code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowerCamelCase : List[Any] = { """configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ """GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTBigCodeForSequenceClassification""", """GPTBigCodeForTokenClassification""", """GPTBigCodeForCausalLM""", """GPTBigCodeModel""", """GPTBigCodePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys _lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
from __future__ import annotations import requests _lowerCamelCase : str = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 1 , lowercase_ = "new" , lowercase_ = None ) -> dict: """simple docstring""" A__ = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(lowercase_ ) - valid_terms ) ): A__ = f"""Invalid search term: {invalid_search_terms}""" raise ValueError(lowercase_ ) A__ = requests.get( f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , ) if response.status_code == 429: raise requests.HTTPError A__ = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(lowercase_ )} A__ = {} for id_ in range(lowercase_ ): A__ = { item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
87
1
from __future__ import annotations from collections import namedtuple def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple: """simple docstring""" A__ = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
87
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = JukeboxTokenizer UpperCAmelCase__ = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' import torch A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''') A__ = tokenizer(**self.metas)['''input_ids'''] # fmt: off A__ = [ torch.tensor([[ 0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]]), torch.tensor([[0, 0, 0, 1_069, 11]]), torch.tensor([[0, 0, 0, 1_069, 11]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2])) @require_torch def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]: '''simple docstring''' import torch A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''') A__ = tokenizer(**self.metas)['''input_ids'''] # fmt: off A__ = [ torch.tensor([[ 0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]]), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
87
1
import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str: """simple docstring""" if openai_config_file == "": A__ = OpenAIGPTConfig() else: A__ = OpenAIGPTConfig.from_json_file(lowercase_ ) A__ = OpenAIGPTModel(lowercase_ ) # Load weights from numpy load_tf_weights_in_openai_gpt(lowercase_ , lowercase_ , lowercase_ ) # Save pytorch-model A__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME A__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , lowercase_ ) print(f"""Save configuration file to {pytorch_config_dump_path}""" ) with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _lowerCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--openai_checkpoint_folder_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--openai_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) _lowerCamelCase : Optional[Any] = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
87
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : List[str] = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''openai-gpt''' UpperCAmelCase__ = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict=40_478 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=1e-5 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Any="cls_index" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=0.1 , **UpperCAmelCase__ : Dict , ) ->Any: '''simple docstring''' A__ = vocab_size A__ = n_positions A__ = n_embd A__ = n_layer A__ = n_head A__ = afn A__ = resid_pdrop A__ = embd_pdrop A__ = attn_pdrop A__ = layer_norm_epsilon A__ = initializer_range A__ = summary_type A__ = summary_use_proj A__ = summary_activation A__ = summary_first_dropout A__ = summary_proj_to_labels super().__init__(**UpperCAmelCase__)
87
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCamelCase : Any = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Union[str, Any] = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
87
1
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = 42 class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' @register_to_config def __init__( self : int , UpperCAmelCase__ : int = 65_536 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : str = "fourier" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase__ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase__ : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase__ : str = None , UpperCAmelCase__ : Tuple[int] = (32, 32, 64) , UpperCAmelCase__ : str = None , UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = False , ) ->Tuple: '''simple docstring''' super().__init__() A__ = sample_size # time if time_embedding_type == "fourier": A__ = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=UpperCAmelCase__ , log=UpperCAmelCase__ , flip_sin_to_cos=UpperCAmelCase__) A__ = 2 * block_out_channels[0] elif time_embedding_type == "positional": A__ = Timesteps( block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase__ , downscale_freq_shift=UpperCAmelCase__) A__ = block_out_channels[0] if use_timestep_embedding: A__ = block_out_channels[0] * 4 A__ = TimestepEmbedding( in_channels=UpperCAmelCase__ , time_embed_dim=UpperCAmelCase__ , act_fn=UpperCAmelCase__ , out_dim=block_out_channels[0] , ) A__ = nn.ModuleList([]) A__ = None A__ = nn.ModuleList([]) A__ = None # down A__ = in_channels for i, down_block_type in enumerate(UpperCAmelCase__): A__ = output_channel A__ = block_out_channels[i] if i == 0: input_channel += extra_in_channels A__ = i == len(UpperCAmelCase__) - 1 A__ = get_down_block( UpperCAmelCase__ , num_layers=UpperCAmelCase__ , in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(UpperCAmelCase__) # mid A__ = get_mid_block( UpperCAmelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase__ , add_downsample=UpperCAmelCase__ , ) # up A__ = list(reversed(UpperCAmelCase__)) A__ = reversed_block_out_channels[0] if out_block_type is None: A__ = out_channels else: A__ = block_out_channels[0] for i, up_block_type in enumerate(UpperCAmelCase__): A__ = output_channel A__ = ( reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase__) - 1 else final_upsample_channels ) A__ = i == len(UpperCAmelCase__) - 1 A__ = get_up_block( UpperCAmelCase__ , num_layers=UpperCAmelCase__ , in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(UpperCAmelCase__) A__ = output_channel # out A__ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32) A__ = get_out_block( out_block_type=UpperCAmelCase__ , num_groups_out=UpperCAmelCase__ , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase__ , act_fn=UpperCAmelCase__ , fc_dim=block_out_channels[-1] // 4 , ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : Union[torch.Tensor, float, int] , UpperCAmelCase__ : bool = True , ) ->Union[UNetaDOutput, Tuple]: '''simple docstring''' A__ = timestep if not torch.is_tensor(UpperCAmelCase__): A__ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device) elif torch.is_tensor(UpperCAmelCase__) and len(timesteps.shape) == 0: A__ = timesteps[None].to(sample.device) A__ = self.time_proj(UpperCAmelCase__) if self.config.use_timestep_embedding: A__ = self.time_mlp(UpperCAmelCase__) else: A__ = timestep_embed[..., None] A__ = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) A__ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:])) # 2. down A__ = () for downsample_block in self.down_blocks: A__ , A__ = downsample_block(hidden_states=UpperCAmelCase__ , temb=UpperCAmelCase__) down_block_res_samples += res_samples # 3. mid if self.mid_block: A__ = self.mid_block(UpperCAmelCase__ , UpperCAmelCase__) # 4. up for i, upsample_block in enumerate(self.up_blocks): A__ = down_block_res_samples[-1:] A__ = down_block_res_samples[:-1] A__ = upsample_block(UpperCAmelCase__ , res_hidden_states_tuple=UpperCAmelCase__ , temb=UpperCAmelCase__) # 5. post-process if self.out_block: A__ = self.out_block(UpperCAmelCase__ , UpperCAmelCase__) if not return_dict: return (sample,) return UNetaDOutput(sample=UpperCAmelCase__)
87
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Dict: """simple docstring""" if "." in tensor_name: A__ = tensor_name.split('''.''' ) for split in splits[:-1]: A__ = getattr(lowercase_ , lowercase_ ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) A__ = new_module A__ = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) A__ = tensor_name in module._buffers A__ = getattr(lowercase_ , lowercase_ ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) A__ = False A__ = False if is_buffer or not is_bitsandbytes_available(): A__ = False A__ = False else: A__ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) A__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: A__ = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: A__ = old_value.to(lowercase_ ) elif isinstance(lowercase_ , torch.Tensor ): A__ = value.to('''cpu''' ) if value.dtype == torch.inta: A__ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: A__ = torch.tensor(lowercase_ , device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , lowercase_ ) and fpaa_statistics is None: A__ = new_value.T A__ = old_value.__dict__ if is_abit: A__ = bnb.nn.IntaParams(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ ) elif is_abit: A__ = bnb.nn.Paramsabit(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ ) A__ = new_value if fpaa_statistics is not None: setattr(module.weight , '''SCB''' , fpaa_statistics.to(lowercase_ ) ) else: if value is None: A__ = old_value.to(lowercase_ ) elif isinstance(lowercase_ , torch.Tensor ): A__ = value.to(lowercase_ ) else: A__ = torch.tensor(lowercase_ , device=lowercase_ ) if is_buffer: A__ = new_value else: A__ = nn.Parameter(lowercase_ , requires_grad=old_value.requires_grad ) A__ = new_value def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False ) -> Dict: """simple docstring""" for name, module in model.named_children(): if current_key_name is None: A__ = [] current_key_name.append(lowercase_ ) if (isinstance(lowercase_ , nn.Linear ) or isinstance(lowercase_ , lowercase_ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(lowercase_ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(lowercase_ , lowercase_ ): A__ , A__ = module.weight.shape else: A__ = module.in_features A__ = module.out_features if quantization_config.quantization_method() == "llm_int8": A__ = bnb.nn.LinearabitLt( lowercase_ , lowercase_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) A__ = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: A__ = bnb.nn.Linearabit( lowercase_ , lowercase_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) A__ = True # Store the module class in case we need to transpose the weight later A__ = type(lowercase_ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(lowercase_ ) if len(list(module.children() ) ) > 0: A__ , A__ = _replace_with_bnb_linear( lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_been_replaced=lowercase_ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Tuple: """simple docstring""" A__ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert A__ , A__ = _replace_with_bnb_linear( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Dict: """simple docstring""" warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , lowercase_ , ) return replace_with_bnb_linear(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Optional[Any]: """simple docstring""" warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , lowercase_ , ) return set_module_quantized_tensor_to_device(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = deepcopy(lowercase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() A__ = find_tied_parameters(lowercase_ ) # For compatibility with Accelerate < 0.18 if isinstance(lowercase_ , lowercase_ ): A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: A__ = sum(lowercase_ , [] ) A__ = len(lowercase_ ) > 0 # Check if it is a base model A__ = not hasattr(lowercase_ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A__ = list(model.named_children() ) A__ = [list_modules[-1][0]] # add last module together with tied weights A__ = set(lowercase_ ) - set(lowercase_ ) A__ = list(set(lowercase_ ) ) + list(lowercase_ ) # remove ".weight" from the keys A__ = ['''.weight''', '''.bias'''] A__ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A__ = name.replace(lowercase_ , '''''' ) filtered_module_names.append(lowercase_ ) return filtered_module_names
87
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _lowerCamelCase : Union[str, Any] = { """configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[int] = [ """LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongT5EncoderModel""", """LongT5ForConditionalGeneration""", """LongT5Model""", """LongT5PreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[int] = [ """FlaxLongT5ForConditionalGeneration""", """FlaxLongT5Model""", """FlaxLongT5PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys _lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) _lowerCamelCase : str = 299792458 # Symbols _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = symbols("""ct x y z""") def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float: """simple docstring""" if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float: """simple docstring""" return 1 / sqrt(1 - beta(lowercase_ ) ** 2 ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> np.ndarray: """simple docstring""" return np.array( [ [gamma(lowercase_ ), -gamma(lowercase_ ) * beta(lowercase_ ), 0, 0], [-gamma(lowercase_ ) * beta(lowercase_ ), gamma(lowercase_ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = None ) -> np.ndarray: """simple docstring""" if event is None: A__ = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(lowercase_ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: _lowerCamelCase : Tuple = transform(29979245) print("""Example of four vector: """) print(F'''ct\' = {four_vector[0]}''') print(F'''x\' = {four_vector[1]}''') print(F'''y\' = {four_vector[2]}''') print(F'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values _lowerCamelCase : int = {ct: c, x: 1, y: 1, z: 1} _lowerCamelCase : Any = [four_vector[i].subs(sub_dict) for i in range(4)] print(F'''\n{numerical_vector}''')
87
1
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = AudioLDMPipeline UpperCAmelCase__ = TEXT_TO_AUDIO_PARAMS UpperCAmelCase__ = TEXT_TO_AUDIO_BATCH_PARAMS UpperCAmelCase__ = frozenset( [ '''num_inference_steps''', '''num_waveforms_per_prompt''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' torch.manual_seed(0) A__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=UpperCAmelCase__ , ) A__ = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , ) torch.manual_seed(0) A__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0) A__ = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , ) A__ = ClapTextModelWithProjection(UpperCAmelCase__) A__ = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77) A__ = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=UpperCAmelCase__ , ) A__ = SpeechTaHifiGan(UpperCAmelCase__) A__ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''vocoder''': vocoder, } return components def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any=0) ->List[str]: '''simple docstring''' if str(UpperCAmelCase__).startswith('''mps'''): A__ = torch.manual_seed(UpperCAmelCase__) else: A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(UpperCAmelCase__) A__ = { '''prompt''': '''A hammer hitting a wooden surface''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, } return inputs def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: '''simple docstring''' A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator A__ = self.get_dummy_components() A__ = AudioLDMPipeline(**UpperCAmelCase__) A__ = audioldm_pipe.to(UpperCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = self.get_dummy_inputs(UpperCAmelCase__) A__ = audioldm_pipe(**UpperCAmelCase__) A__ = output.audios[0] assert audio.ndim == 1 assert len(UpperCAmelCase__) == 256 A__ = audio[:10] A__ = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033]) assert np.abs(audio_slice - expected_slice).max() < 1e-2 def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]: '''simple docstring''' A__ = self.get_dummy_components() A__ = AudioLDMPipeline(**UpperCAmelCase__) A__ = audioldm_pipe.to(UpperCAmelCase__) A__ = audioldm_pipe.to(UpperCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = self.get_dummy_inputs(UpperCAmelCase__) A__ = 3 * [inputs['''prompt''']] # forward A__ = audioldm_pipe(**UpperCAmelCase__) A__ = output.audios[0] A__ = self.get_dummy_inputs(UpperCAmelCase__) A__ = 3 * [inputs.pop('''prompt''')] A__ = audioldm_pipe.tokenizer( UpperCAmelCase__ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCAmelCase__ , return_tensors='''pt''' , ) A__ = text_inputs['''input_ids'''].to(UpperCAmelCase__) A__ = audioldm_pipe.text_encoder( UpperCAmelCase__ , ) A__ = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state A__ = F.normalize(UpperCAmelCase__ , dim=-1) A__ = prompt_embeds # forward A__ = audioldm_pipe(**UpperCAmelCase__) A__ = output.audios[0] assert np.abs(audio_a - audio_a).max() < 1e-2 def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' A__ = self.get_dummy_components() A__ = AudioLDMPipeline(**UpperCAmelCase__) A__ = audioldm_pipe.to(UpperCAmelCase__) A__ = audioldm_pipe.to(UpperCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = self.get_dummy_inputs(UpperCAmelCase__) A__ = 3 * ['''this is a negative prompt'''] A__ = negative_prompt A__ = 3 * [inputs['''prompt''']] # forward A__ = audioldm_pipe(**UpperCAmelCase__) A__ = output.audios[0] A__ = self.get_dummy_inputs(UpperCAmelCase__) A__ = 3 * [inputs.pop('''prompt''')] A__ = [] for p in [prompt, negative_prompt]: A__ = audioldm_pipe.tokenizer( UpperCAmelCase__ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCAmelCase__ , return_tensors='''pt''' , ) A__ = text_inputs['''input_ids'''].to(UpperCAmelCase__) A__ = audioldm_pipe.text_encoder( UpperCAmelCase__ , ) A__ = text_embeds.text_embeds # additional L_2 normalization over each hidden-state A__ = F.normalize(UpperCAmelCase__ , dim=-1) embeds.append(UpperCAmelCase__) A__ , A__ = embeds # forward A__ = audioldm_pipe(**UpperCAmelCase__) A__ = output.audios[0] assert np.abs(audio_a - audio_a).max() < 1e-2 def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple: '''simple docstring''' A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator A__ = self.get_dummy_components() A__ = PNDMScheduler(skip_prk_steps=UpperCAmelCase__) A__ = AudioLDMPipeline(**UpperCAmelCase__) A__ = audioldm_pipe.to(UpperCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = self.get_dummy_inputs(UpperCAmelCase__) A__ = '''egg cracking''' A__ = audioldm_pipe(**UpperCAmelCase__ , negative_prompt=UpperCAmelCase__) A__ = output.audios[0] assert audio.ndim == 1 assert len(UpperCAmelCase__) == 256 A__ = audio[:10] A__ = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032]) assert np.abs(audio_slice - expected_slice).max() < 1e-2 def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator A__ = self.get_dummy_components() A__ = PNDMScheduler(skip_prk_steps=UpperCAmelCase__) A__ = AudioLDMPipeline(**UpperCAmelCase__) A__ = audioldm_pipe.to(UpperCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = '''A hammer hitting a wooden surface''' # test num_waveforms_per_prompt=1 (default) A__ = audioldm_pipe(UpperCAmelCase__ , num_inference_steps=2).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts A__ = 2 A__ = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt A__ = 2 A__ = audioldm_pipe(UpperCAmelCase__ , num_inference_steps=2 , num_waveforms_per_prompt=UpperCAmelCase__).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts A__ = 2 A__ = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=UpperCAmelCase__).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple: '''simple docstring''' A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator A__ = self.get_dummy_components() A__ = AudioLDMPipeline(**UpperCAmelCase__) A__ = audioldm_pipe.to(UpperCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = audioldm_pipe.vocoder.config.sampling_rate A__ = self.get_dummy_inputs(UpperCAmelCase__) A__ = audioldm_pipe(audio_length_in_s=0.016 , **UpperCAmelCase__) A__ = output.audios[0] assert audio.ndim == 1 assert len(UpperCAmelCase__) / vocoder_sampling_rate == 0.016 A__ = audioldm_pipe(audio_length_in_s=0.032 , **UpperCAmelCase__) A__ = output.audios[0] assert audio.ndim == 1 assert len(UpperCAmelCase__) / vocoder_sampling_rate == 0.032 def SCREAMING_SNAKE_CASE ( self : str) ->Dict: '''simple docstring''' A__ = self.get_dummy_components() A__ = AudioLDMPipeline(**UpperCAmelCase__) A__ = audioldm_pipe.to(UpperCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = ['''hey'''] A__ = audioldm_pipe(UpperCAmelCase__ , num_inference_steps=1) A__ = output.audios.shape assert audio_shape == (1, 256) A__ = audioldm_pipe.vocoder.config config.model_in_dim *= 2 A__ = SpeechTaHifiGan(UpperCAmelCase__).to(UpperCAmelCase__) A__ = audioldm_pipe(UpperCAmelCase__ , num_inference_steps=1) A__ = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' self._test_inference_batch_single_identical(test_mean_pixel_difference=UpperCAmelCase__) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase__) @slow class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : int) ->int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int="cpu" , UpperCAmelCase__ : List[Any]=torch.floataa , UpperCAmelCase__ : str=0) ->str: '''simple docstring''' A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(UpperCAmelCase__) A__ = np.random.RandomState(UpperCAmelCase__).standard_normal((1, 8, 128, 16)) A__ = torch.from_numpy(UpperCAmelCase__).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__) A__ = { '''prompt''': '''A hammer hitting a wooden surface''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 2.5, } return inputs def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' A__ = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''') A__ = audioldm_pipe.to(UpperCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = self.get_inputs(UpperCAmelCase__) A__ = 25 A__ = audioldm_pipe(**UpperCAmelCase__).audios[0] assert audio.ndim == 1 assert len(UpperCAmelCase__) == 81_920 A__ = audio[77_230:77_240] A__ = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315]) A__ = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-2 def SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]: '''simple docstring''' A__ = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''') A__ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config) A__ = audioldm_pipe.to(UpperCAmelCase__) audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = self.get_inputs(UpperCAmelCase__) A__ = audioldm_pipe(**UpperCAmelCase__).audios[0] assert audio.ndim == 1 assert len(UpperCAmelCase__) == 81_920 A__ = audio[27_780:27_790] A__ = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212]) A__ = np.abs(expected_slice - audio_slice).max() assert max_diff < 3e-2
87
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list: """simple docstring""" if len(lowercase_ ) <= 1: return [tuple(lowercase_ )] A__ = [] def generate(lowercase_ , lowercase_ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowercase_ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A__ , A__ = arr[k - 1], arr[i] else: # k is odd A__ , A__ = arr[k - 1], arr[0] generate(k - 1 , lowercase_ ) generate(len(lowercase_ ) , lowercase_ ) return res if __name__ == "__main__": _lowerCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip() _lowerCamelCase : str = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
87
1
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : int) ->None: '''simple docstring''' A__ = Vector([1, 2, 3]) self.assertEqual(x.component(0) , 1) self.assertEqual(x.component(2) , 3) A__ = Vector() def SCREAMING_SNAKE_CASE ( self : int) ->None: '''simple docstring''' A__ = Vector([0, 0, 0, 0, 0, 1]) self.assertEqual(str(UpperCAmelCase__) , '''(0,0,0,0,0,1)''') def SCREAMING_SNAKE_CASE ( self : Dict) ->None: '''simple docstring''' A__ = Vector([1, 2, 3, 4]) self.assertEqual(len(UpperCAmelCase__) , 4) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->None: '''simple docstring''' A__ = Vector([1, 2]) A__ = Vector([1, 2, 3, 4, 5]) A__ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) A__ = Vector([1, -1, 1, -1, 2, -3, 4, -5]) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3) self.assertEqual(z.euclidean_length() , 0) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->None: '''simple docstring''' A__ = Vector([1, 2, 3]) A__ = Vector([1, 1, 1]) self.assertEqual((x + y).component(0) , 2) self.assertEqual((x + y).component(1) , 3) self.assertEqual((x + y).component(2) , 4) def SCREAMING_SNAKE_CASE ( self : Any) ->None: '''simple docstring''' A__ = Vector([1, 2, 3]) A__ = Vector([1, 1, 1]) self.assertEqual((x - y).component(0) , 0) self.assertEqual((x - y).component(1) , 1) self.assertEqual((x - y).component(2) , 2) def SCREAMING_SNAKE_CASE ( self : int) ->None: '''simple docstring''' A__ = Vector([1, 2, 3]) A__ = Vector([2, -1, 4]) # for test of dot product A__ = Vector([1, -2, -1]) self.assertEqual(str(x * 3.0) , '''(3.0,6.0,9.0)''') self.assertEqual((a * b) , 0) def SCREAMING_SNAKE_CASE ( self : Dict) ->None: '''simple docstring''' self.assertEqual(str(zero_vector(10)).count('''0''') , 10) def SCREAMING_SNAKE_CASE ( self : Any) ->None: '''simple docstring''' self.assertEqual(str(unit_basis_vector(3 , 1)) , '''(0,1,0)''') def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->None: '''simple docstring''' A__ = Vector([1, 2, 3]) A__ = Vector([1, 0, 1]) self.assertEqual(str(axpy(2 , UpperCAmelCase__ , UpperCAmelCase__)) , '''(3,4,7)''') def SCREAMING_SNAKE_CASE ( self : Any) ->None: '''simple docstring''' A__ = Vector([1, 0, 0, 0, 0, 0]) A__ = x.copy() self.assertEqual(str(UpperCAmelCase__) , str(UpperCAmelCase__)) def SCREAMING_SNAKE_CASE ( self : str) ->None: '''simple docstring''' A__ = Vector([1, 0, 0]) x.change_component(0 , 0) x.change_component(1 , 1) self.assertEqual(str(UpperCAmelCase__) , '''(0,1,0)''') def SCREAMING_SNAKE_CASE ( self : Tuple) ->None: '''simple docstring''' A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCAmelCase__)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->None: '''simple docstring''' A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) A__ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height()): for y in range(a.width()): self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase__ , UpperCAmelCase__)) def SCREAMING_SNAKE_CASE ( self : List[str]) ->None: '''simple docstring''' A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) A__ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height()): for y in range(a.width()): self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase__ , UpperCAmelCase__)) def SCREAMING_SNAKE_CASE ( self : Any) ->None: '''simple docstring''' A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual(-5 , a.determinant()) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->None: '''simple docstring''' A__ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3) A__ = Vector([1, 2, 3]) self.assertEqual('''(14,32,50)''' , str(a * x)) self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2)) def SCREAMING_SNAKE_CASE ( self : int) ->None: '''simple docstring''' A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) a.change_component(0 , 2 , 5) self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCAmelCase__)) def SCREAMING_SNAKE_CASE ( self : List[str]) ->None: '''simple docstring''' A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual(7 , a.component(2 , 1) , 0.01) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->None: '''simple docstring''' A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) A__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3) self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b)) def SCREAMING_SNAKE_CASE ( self : Any) ->None: '''simple docstring''' A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) A__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3) self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b)) def SCREAMING_SNAKE_CASE ( self : Any) ->None: '''simple docstring''' self.assertEqual( '''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5)) , ) if __name__ == "__main__": unittest.main()
87
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" return 1.0 / (1.0 + np.exp(-_outputs )) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = np.max(_outputs , axis=-1 , keepdims=lowercase_ ) A__ = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase_ ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''sigmoid''' UpperCAmelCase__ = '''softmax''' UpperCAmelCase__ = '''none''' @add_end_docstrings( UpperCAmelCase__ , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = False UpperCAmelCase__ = ClassificationFunction.NONE def __init__( self : Any , **UpperCAmelCase__ : Optional[Any]) ->str: '''simple docstring''' super().__init__(**UpperCAmelCase__) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int="" , **UpperCAmelCase__ : Any) ->int: '''simple docstring''' A__ = tokenizer_kwargs A__ = {} if hasattr(self.model.config , '''return_all_scores''') and return_all_scores is None: A__ = self.model.config.return_all_scores if isinstance(UpperCAmelCase__ , UpperCAmelCase__) or top_k is None: A__ = top_k A__ = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , UpperCAmelCase__ , ) if return_all_scores: A__ = None else: A__ = 1 if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: A__ = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int]) ->Union[str, Any]: '''simple docstring''' A__ = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__) # TODO try and retrieve it in a nicer way from _sanitize_parameters. A__ = '''top_k''' not in kwargs if isinstance(args[0] , UpperCAmelCase__) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Any , **UpperCAmelCase__ : str) ->Dict[str, GenericTensor]: '''simple docstring''' A__ = self.framework if isinstance(UpperCAmelCase__ , UpperCAmelCase__): return self.tokenizer(**UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__) and len(UpperCAmelCase__) == 1 and isinstance(inputs[0] , UpperCAmelCase__) and len(inputs[0]) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''') return self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple) ->Tuple: '''simple docstring''' return self.model(**UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : str=True) ->Dict: '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: A__ = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: A__ = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''') and function_to_apply is None: A__ = self.model.config.function_to_apply else: A__ = ClassificationFunction.NONE A__ = model_outputs['''logits'''][0] A__ = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: A__ = sigmoid(UpperCAmelCase__) elif function_to_apply == ClassificationFunction.SOFTMAX: A__ = softmax(UpperCAmelCase__) elif function_to_apply == ClassificationFunction.NONE: A__ = outputs else: raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""") if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} A__ = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(UpperCAmelCase__) ] if not _legacy: dict_scores.sort(key=lambda UpperCAmelCase__: x["score"] , reverse=UpperCAmelCase__) if top_k is not None: A__ = dict_scores[:top_k] return dict_scores
87
1
from math import factorial def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float: """simple docstring""" if successes > trials: raise ValueError('''successes must be lower or equal to trials''' ) if trials < 0 or successes < 0: raise ValueError('''the function is defined for non-negative integers''' ) if not isinstance(lowercase_ , lowercase_ ) or not isinstance(lowercase_ , lowercase_ ): raise ValueError('''the function is defined for non-negative integers''' ) if not 0 < prob < 1: raise ValueError('''prob has to be in range of 1 - 0''' ) A__ = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! A__ = float(factorial(lowercase_ ) ) coefficient /= factorial(lowercase_ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print("""Probability of 2 successes out of 4 trails""") print("""with probability of 0.75 is:""", end=""" """) print(binomial_distribution(2, 4, 0.75))
87
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCamelCase : Any = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Union[str, Any] = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Optional[int]: """simple docstring""" A__ = [] # fmt: off # stem: rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') ) rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') ) rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') ) # backbone rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') ) rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') ) rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) # fmt: on return rename_keys def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False ) -> Dict: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: A__ = '''''' else: A__ = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) A__ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[ : config.hidden_size, : ] A__ = in_proj_bias[: config.hidden_size] A__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ = in_proj_weight[ -config.hidden_size :, : ] A__ = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = dct.pop(lowercase_ ) A__ = val def SCREAMING_SNAKE_CASE ( ) -> Dict: """simple docstring""" A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False ) -> List[Any]: """simple docstring""" A__ = BitConfig( global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=lowercase_ , ) A__ = ViTHybridConfig(backbone_config=lowercase_ , image_size=384 , num_labels=1_000 ) A__ = False # load original model from timm A__ = timm.create_model(lowercase_ , pretrained=lowercase_ ) timm_model.eval() # load state_dict of original model, remove and rename some keys A__ = timm_model.state_dict() if base_model: remove_classification_head_(lowercase_ ) A__ = create_rename_keys(lowercase_ , lowercase_ ) for src, dest in rename_keys: rename_key(lowercase_ , lowercase_ , lowercase_ ) read_in_q_k_v(lowercase_ , lowercase_ , lowercase_ ) A__ = '''huggingface/label-files''' A__ = '''imagenet-1k-id2label.json''' A__ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) ) A__ = {int(lowercase_ ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": A__ = ViTHybridModel(lowercase_ ).eval() else: A__ = ViTHybridForImageClassification(lowercase_ ).eval() model.load_state_dict(lowercase_ ) # create image processor A__ = create_transform(**resolve_data_config({} , model=lowercase_ ) ) A__ = transform.transforms A__ = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } A__ = ViTHybridImageProcessor( do_resize=lowercase_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=lowercase_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) A__ = prepare_img() A__ = transform(lowercase_ ).unsqueeze(0 ) A__ = processor(lowercase_ , return_tensors='''pt''' ).pixel_values # verify pixel values assert torch.allclose(lowercase_ , lowercase_ ) # verify logits with torch.no_grad(): A__ = model(lowercase_ ) A__ = outputs.logits print('''Predicted class:''' , logits.argmax(-1 ).item() ) if base_model: A__ = timm_model.forward_features(lowercase_ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(lowercase_ , outputs.pooler_output , atol=1E-3 ) else: A__ = timm_model(lowercase_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase_ , outputs.logits , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase_ ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(lowercase_ ) if push_to_hub: print(f"""Pushing model and processor to the hub {vit_name}""" ) model.push_to_hub(f"""ybelkada/{vit_name}""" ) processor.push_to_hub(f"""ybelkada/{vit_name}""" ) if __name__ == "__main__": _lowerCamelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_r50_s16_384""", type=str, help="""Name of the hybrid ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) _lowerCamelCase : Dict = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
87
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Union[str, Any] = { """google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""", """google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''mobilenet_v1''' def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Optional[Any]=224 , UpperCAmelCase__ : Optional[int]=1.0 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : Tuple="relu6" , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=0.999 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[int]=0.001 , **UpperCAmelCase__ : Dict , ) ->List[str]: '''simple docstring''' super().__init__(**UpperCAmelCase__) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''') A__ = num_channels A__ = image_size A__ = depth_multiplier A__ = min_depth A__ = hidden_act A__ = tf_padding A__ = classifier_dropout_prob A__ = initializer_range A__ = layer_norm_eps class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self : Any) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict([('''pixel_values''', {0: '''batch'''})]) @property def SCREAMING_SNAKE_CASE ( self : List[str]) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})]) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})]) @property def SCREAMING_SNAKE_CASE ( self : int) ->float: '''simple docstring''' return 1e-4
87
1
from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) _lowerCamelCase : Union[str, Any] = """ transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. """ class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' @staticmethod def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ : ArgumentParser) ->Tuple: '''simple docstring''' A__ = parser.add_parser( '''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , ) train_parser.add_argument('''--model_type''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='''Model\'s type.''') train_parser.add_argument( '''--tf_checkpoint''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='''TensorFlow checkpoint path or folder.''') train_parser.add_argument( '''--pytorch_dump_output''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='''Path to the PyTorch saved model output.''') train_parser.add_argument('''--config''' , type=UpperCAmelCase__ , default='''''' , help='''Configuration file path or folder.''') train_parser.add_argument( '''--finetuning_task_name''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , ) train_parser.set_defaults(func=UpperCAmelCase__) def __init__( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , *UpperCAmelCase__ : Any , ) ->Optional[int]: '''simple docstring''' A__ = logging.get_logger('''transformers-cli/converting''') self._logger.info(f"""Loading model {model_type}""") A__ = model_type A__ = tf_checkpoint A__ = pytorch_dump_output A__ = config A__ = finetuning_task_name def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCAmelCase__) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCAmelCase__) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCAmelCase__) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(UpperCAmelCase__) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCAmelCase__) if "ckpt" in self._tf_checkpoint.lower(): A__ = self._tf_checkpoint A__ = '''''' else: A__ = self._tf_checkpoint A__ = '''''' convert_transfo_xl_checkpoint_to_pytorch( UpperCAmelCase__ , self._config , self._pytorch_dump_output , UpperCAmelCase__) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCAmelCase__) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCAmelCase__) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) else: raise ValueError( '''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''')
87
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp _lowerCamelCase : str = 5 _lowerCamelCase : int = 10 @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = SpeechaTextTokenizer UpperCAmelCase__ = False UpperCAmelCase__ = True def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' super().setUp() A__ = sp.SentencePieceProcessor() spm_model.Load(UpperCAmelCase__) A__ = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_) for id_ in range(len(UpperCAmelCase__))] A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__)))) A__ = Path(self.tmpdirname) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file''']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file''']) A__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]: '''simple docstring''' A__ = '''<pad>''' A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<s>''') self.assertEqual(vocab_keys[1] , '''<pad>''') self.assertEqual(vocab_keys[-1] , '''j''') self.assertEqual(len(UpperCAmelCase__) , 1_001) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_001) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' A__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) A__ = tokenizer.tokenize('''This is a test''') self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , [289, 50, 14, 174, 386] , ) A__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') self.assertListEqual( UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) A__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__) self.assertListEqual(UpperCAmelCase__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8]) A__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__) self.assertListEqual( UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: '''simple docstring''' A__ = {'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , ) @require_sentencepiece class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = '''valhalla/s2t_mustc_multilinguial_medium''' UpperCAmelCase__ = '''C\'est trop cool''' UpperCAmelCase__ = '''Esto es genial''' @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict) ->Dict: '''simple docstring''' A__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name) return cls def SCREAMING_SNAKE_CASE ( self : str) ->Dict: '''simple docstring''' self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' self.assertEqual(self.tokenizer.vocab_size , 10_000) def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids) A__ = [ES_CODE, 4, 1_601, 47, 7_647, 2] A__ = self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__) A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->str: '''simple docstring''' A__ = '''fr''' A__ = self.tokenizer(self.french_text).input_ids self.assertEqual(encoded[0] , UpperCAmelCase__) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' A__ = '''fr''' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE]) A__ = '''es''' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
87
1
_lowerCamelCase : Union[str, Any] = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ _lowerCamelCase : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}] _lowerCamelCase : Dict = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
87
from __future__ import annotations import requests def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict: """simple docstring""" A__ = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty""" return requests.get(lowercase_ ).json() def SCREAMING_SNAKE_CASE ( lowercase_ = 10 ) -> list[dict]: """simple docstring""" A__ = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty''' A__ = requests.get(lowercase_ ).json()[:max_stories] return [get_hackernews_story(lowercase_ ) for story_id in story_ids] def SCREAMING_SNAKE_CASE ( lowercase_ = 10 ) -> str: """simple docstring""" A__ = hackernews_top_stories(lowercase_ ) return "\n".join('''* [{title}]({url})'''.format(**lowercase_ ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
87
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Tuple = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { """vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""", # See all GLPN models at https://huggingface.co/models?filter=glpn } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''glpn''' def __init__( self : str , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Dict=[2, 2, 2, 2] , UpperCAmelCase__ : Tuple=[8, 4, 2, 1] , UpperCAmelCase__ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase__ : Tuple=[7, 3, 3, 3] , UpperCAmelCase__ : List[Any]=[4, 2, 2, 2] , UpperCAmelCase__ : int=[1, 2, 5, 8] , UpperCAmelCase__ : Dict=[4, 4, 4, 4] , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : List[Any]=1e-6 , UpperCAmelCase__ : str=64 , UpperCAmelCase__ : Dict=10 , UpperCAmelCase__ : Any=-1 , **UpperCAmelCase__ : Optional[Any] , ) ->Tuple: '''simple docstring''' super().__init__(**UpperCAmelCase__) A__ = num_channels A__ = num_encoder_blocks A__ = depths A__ = sr_ratios A__ = hidden_sizes A__ = patch_sizes A__ = strides A__ = mlp_ratios A__ = num_attention_heads A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = initializer_range A__ = drop_path_rate A__ = layer_norm_eps A__ = decoder_hidden_size A__ = max_depth A__ = head_in_index
87
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _lowerCamelCase : Optional[List[str]] = None _lowerCamelCase : int = """<""" if sys.byteorder == """little""" else """>""" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _lowerCamelCase : Union[str, Any] = [ np.dtype("""|b1"""), np.dtype("""|u1"""), np.dtype("""<u2"""), np.dtype(""">u2"""), np.dtype("""<i2"""), np.dtype(""">i2"""), np.dtype("""<u4"""), np.dtype(""">u4"""), np.dtype("""<i4"""), np.dtype(""">i4"""), np.dtype("""<f4"""), np.dtype(""">f4"""), np.dtype("""<f8"""), np.dtype(""">f8"""), ] @dataclass class UpperCamelCase_ : '''simple docstring''' UpperCAmelCase__ = True UpperCAmelCase__ = None # Automatically constructed UpperCAmelCase__ = "PIL.Image.Image" UpperCAmelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) UpperCAmelCase__ = field(default='''Image''' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ ) def __call__( self : List[str]) ->List[str]: '''simple docstring''' return self.pa_type def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) ->dict: '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''') if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = np.array(UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__): return {"path": value, "bytes": None} elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): return {"path": None, "bytes": value} elif isinstance(UpperCAmelCase__ , np.ndarray): # convert the image array to PNG/TIFF bytes return encode_np_array(UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , PIL.Image.Image): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(UpperCAmelCase__) elif value.get('''path''') is not None and os.path.isfile(value['''path''']): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('''path''')} elif value.get('''bytes''') is not None or value.get('''path''') is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('''bytes'''), "path": value.get('''path''')} else: raise ValueError( f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""") def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : dict , UpperCAmelCase__ : str=None) ->"PIL.Image.Image": '''simple docstring''' if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''') if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support decoding images, please install \'Pillow\'.''') if token_per_repo_id is None: A__ = {} A__ , A__ = value['''path'''], value['''bytes'''] if bytes_ is None: if path is None: raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""") else: if is_local_path(UpperCAmelCase__): A__ = PIL.Image.open(UpperCAmelCase__) else: A__ = path.split('''::''')[-1] try: A__ = string_to_dict(UpperCAmelCase__ , config.HUB_DATASETS_URL)['''repo_id'''] A__ = token_per_repo_id.get(UpperCAmelCase__) except ValueError: A__ = None with xopen(UpperCAmelCase__ , '''rb''' , use_auth_token=UpperCAmelCase__) as f: A__ = BytesIO(f.read()) A__ = PIL.Image.open(bytes_) else: A__ = PIL.Image.open(BytesIO(bytes_)) image.load() # to avoid "Too many open files" errors return image def SCREAMING_SNAKE_CASE ( self : Dict) ->Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Value return ( self if self.decode else { "bytes": Value('''binary'''), "path": Value('''string'''), } ) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[pa.StringArray, pa.StructArray, pa.ListArray]) ->pa.StructArray: '''simple docstring''' if pa.types.is_string(storage.type): A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.binary()) A__ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null()) elif pa.types.is_binary(storage.type): A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string()) A__ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null()) elif pa.types.is_struct(storage.type): if storage.type.get_field_index('''bytes''') >= 0: A__ = storage.field('''bytes''') else: A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.binary()) if storage.type.get_field_index('''path''') >= 0: A__ = storage.field('''path''') else: A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string()) A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null()) elif pa.types.is_list(storage.type): A__ = pa.array( [encode_np_array(np.array(UpperCAmelCase__))['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string()) A__ = pa.StructArray.from_arrays( [bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null()) return array_cast(UpperCAmelCase__ , self.pa_type) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : pa.StructArray) ->pa.StructArray: '''simple docstring''' @no_op_if_value_is_null def path_to_bytes(UpperCAmelCase__ : Dict): with xopen(UpperCAmelCase__ , '''rb''') as f: A__ = f.read() return bytes_ A__ = pa.array( [ (path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) A__ = pa.array( [os.path.basename(UpperCAmelCase__) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , ) A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null()) return array_cast(UpperCAmelCase__ , self.pa_type) def SCREAMING_SNAKE_CASE ( ) -> List[str]: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() A__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bytes: """simple docstring""" A__ = BytesIO() if image.format in list_image_compression_formats(): A__ = image.format else: A__ = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF''' image.save(lowercase_ , format=lowercase_ ) return buffer.getvalue() def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict: """simple docstring""" if hasattr(lowercase_ , '''filename''' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(lowercase_ )} def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) A__ = array.dtype A__ = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER A__ = dtype.kind A__ = dtype.itemsize A__ = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: A__ = np.dtype('''|u1''' ) if dtype_kind not in ["u", "i"]: raise TypeError( f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: A__ = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: A__ = dtype_byteorder + dtype_kind + str(lowercase_ ) A__ = np.dtype(lowercase_ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) A__ = PIL.Image.fromarray(array.astype(lowercase_ ) ) return {"path": None, "bytes": image_to_bytes(lowercase_ )} def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[dict]: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if objs: A__ , A__ = first_non_null_value(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(lowercase_ , np.ndarray ): A__ = no_op_if_value_is_null(lowercase_ ) return [obj_to_image_dict_func(lowercase_ ) for obj in objs] elif isinstance(lowercase_ , PIL.Image.Image ): A__ = no_op_if_value_is_null(lowercase_ ) return [obj_to_image_dict_func(lowercase_ ) for obj in objs] else: return objs else: return objs
87
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : Any = logging.get_logger(__name__) _lowerCamelCase : str = { """andreasmadsen/efficient_mlm_m0.40""": ( """https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json""" ), } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''roberta-prelayernorm''' def __init__( self : List[str] , UpperCAmelCase__ : Dict=50_265 , UpperCAmelCase__ : List[str]=768 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : Union[str, Any]=3_072 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Tuple=1e-12 , UpperCAmelCase__ : Union[str, Any]=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Optional[int]="absolute" , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : Dict , ) ->Tuple: '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = classifier_dropout class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A__ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ])
87
from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) UpperCAmelCase__ = ( { '''feature-extraction''': TFMobileBertModel, '''fill-mask''': TFMobileBertForMaskedLM, '''question-answering''': TFMobileBertForQuestionAnswering, '''text-classification''': TFMobileBertForSequenceClassification, '''token-classification''': TFMobileBertForTokenClassification, '''zero-shot''': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=False) ->Optional[Any]: '''simple docstring''' A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__) if return_labels: if model_class in get_values(UpperCAmelCase__): A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa) return inputs_dict class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : List[str]=32 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]=512 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Tuple=None , ) ->Any: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope A__ = embedding_size def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) A__ = ids_tensor([self.batch_size] , self.num_choices) A__ = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]) ->Any: '''simple docstring''' A__ = TFMobileBertModel(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) A__ = [input_ids, input_mask] A__ = model(UpperCAmelCase__) A__ = model(UpperCAmelCase__) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple) ->Optional[Any]: '''simple docstring''' A__ = TFMobileBertForMaskedLM(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]) ->int: '''simple docstring''' A__ = TFMobileBertForNextSentencePrediction(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int) ->List[Any]: '''simple docstring''' A__ = TFMobileBertForPreTraining(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple) ->Dict: '''simple docstring''' A__ = self.num_labels A__ = TFMobileBertForSequenceClassification(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int) ->Dict: '''simple docstring''' A__ = self.num_choices A__ = TFMobileBertForMultipleChoice(config=UpperCAmelCase__) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]) ->int: '''simple docstring''' A__ = self.num_labels A__ = TFMobileBertForTokenClassification(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ = TFMobileBertForQuestionAnswering(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE ( self : Any) ->str: '''simple docstring''' A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = TFMobileBertModelTest.TFMobileBertModelTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' for model_name in ["google/mobilebert-uncased"]: A__ = TFMobileBertModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) @require_tf class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any: '''simple docstring''' A__ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''') A__ = tf.constant([[0, 1, 2, 3, 4, 5]]) A__ = model(UpperCAmelCase__)[0] A__ = [1, 6, 30_522] self.assertEqual(output.shape , UpperCAmelCase__) A__ = tf.constant( [ [ [-4.5919547, -9.248295, -9.645256], [-6.7306175, -6.440284, -6.6052837], [-7.2743506, -6.7847915, -6.024673], ] ]) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)
87
1
_lowerCamelCase : str = { """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
87
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : str=224 , UpperCAmelCase__ : str=30 , UpperCAmelCase__ : Tuple=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Tuple=[0.5, 0.5, 0.5] , ) ->str: '''simple docstring''' A__ = size if size is not None else {'''height''': 18, '''width''': 18} A__ = parent A__ = batch_size A__ = num_channels A__ = image_size A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : List[str]) ->str: '''simple docstring''' A__ = EfficientFormerImageProcessorTester(self) @property def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''')) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image) # Test not batched input A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray) # Test not batched input A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor) # Test not batched input A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , )
87
1
import os def SCREAMING_SNAKE_CASE ( lowercase_ = "matrix.txt" ) -> int: """simple docstring""" with open(os.path.join(os.path.dirname(lowercase_ ) , lowercase_ ) ) as in_file: A__ = in_file.read() A__ = [[int(lowercase_ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()] A__ = [[0 for cell in row] for row in grid] A__ = len(grid[0] ) A__ = [[0 for i in range(lowercase_ )] for j in range(lowercase_ )] A__ = grid[0][0] for i in range(1 , lowercase_ ): A__ = grid[0][i] + dp[0][i - 1] for i in range(1 , lowercase_ ): A__ = grid[i][0] + dp[i - 1][0] for i in range(1 , lowercase_ ): for j in range(1 , lowercase_ ): A__ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(F'''{solution() = }''')
87
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance _lowerCamelCase : Dict = 6_378_137.0 _lowerCamelCase : Union[str, Any] = 6_356_752.314_245 _lowerCamelCase : List[Any] = 6378137 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> float: """simple docstring""" A__ = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) ) A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius A__ = haversine_distance(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) / EQUATORIAL_RADIUS # Intermediate P and Q values A__ = (b_lata + b_lata) / 2 A__ = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) A__ = (sin(lowercase_ ) ** 2) * (cos(lowercase_ ) ** 2) A__ = cos(sigma / 2 ) ** 2 A__ = (sigma - sin(lowercase_ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) A__ = (cos(lowercase_ ) ** 2) * (sin(lowercase_ ) ** 2) A__ = sin(sigma / 2 ) ** 2 A__ = (sigma + sin(lowercase_ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
87
1
import colorsys from PIL import Image # type: ignore def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float: """simple docstring""" A__ = x A__ = y for step in range(lowercase_ ): # noqa: B007 A__ = a * a - b * b + x A__ = 2 * a * b + y A__ = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> tuple: """simple docstring""" if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> tuple: """simple docstring""" if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) ) def SCREAMING_SNAKE_CASE ( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ) -> Image.Image: """simple docstring""" A__ = Image.new('''RGB''' , (image_width, image_height) ) A__ = img.load() # loop through the image-coordinates for image_x in range(lowercase_ ): for image_y in range(lowercase_ ): # determine the figure-coordinates based on the image-coordinates A__ = figure_width / image_width * image_height A__ = figure_center_x + (image_x / image_width - 0.5) * figure_width A__ = figure_center_y + (image_y / image_height - 0.5) * figure_height A__ = get_distance(lowercase_ , lowercase_ , lowercase_ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: A__ = get_color_coded_rgb(lowercase_ ) else: A__ = get_black_and_white_rgb(lowercase_ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowerCamelCase : Optional[int] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
87
import heapq import sys import numpy as np _lowerCamelCase : Any = tuple[int, int] class UpperCamelCase_ : '''simple docstring''' def __init__( self : Any) ->str: '''simple docstring''' A__ = [] A__ = set() def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' if not self.empty(): return self.elements[0][0] else: return float('''inf''') def SCREAMING_SNAKE_CASE ( self : Tuple) ->str: '''simple docstring''' return len(self.elements) == 0 def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any]) ->List[str]: '''simple docstring''' if item not in self.set: heapq.heappush(self.elements , (priority, item)) self.set.add(UpperCAmelCase__) else: # update # print("update", item) A__ = [] ((A__) , (A__)) = heapq.heappop(self.elements) while x != item: temp.append((pri, x)) ((A__) , (A__)) = heapq.heappop(self.elements) temp.append((priority, item)) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[Any]) ->Union[str, Any]: '''simple docstring''' if item in self.set: self.set.remove(UpperCAmelCase__) A__ = [] ((A__) , (A__)) = heapq.heappop(self.elements) while x != item: temp.append((pro, x)) ((A__) , (A__)) = heapq.heappop(self.elements) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy)) def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' return self.elements[0][1] def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' ((A__) , (A__)) = heapq.heappop(self.elements) self.set.remove(UpperCAmelCase__) return (priority, item) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = np.array(lowercase_ ) A__ = np.array(lowercase_ ) return np.linalg.norm(a - b ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" return consistent_heuristic(lowercase_ , lowercase_ ) // t def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: """simple docstring""" A__ = g_function[start] + Wa * heuristics[i](lowercase_ , lowercase_ ) return ans def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = np.chararray((n, n) ) for i in range(lowercase_ ): for j in range(lowercase_ ): A__ = '''*''' for i in range(lowercase_ ): for j in range(lowercase_ ): if (j, (n - 1) - i) in blocks: A__ = '''#''' A__ = '''-''' A__ = back_pointer[goal] while x != start: ((A__) , (A__)) = x # print(x) A__ = '''-''' A__ = back_pointer[x] A__ = '''-''' for i in range(lowercase_ ): for j in range(lowercase_ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) A__ = back_pointer[goal] while x != start: print(lowercase_ , end=''' ''' ) A__ = back_pointer[x] print(lowercase_ ) sys.exit() def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Union[str, Any]: """simple docstring""" for itera in range(lowercase_ ): open_list[itera].remove_element(lowercase_ ) # print("s", s) # print("j", j) ((A__) , (A__)) = s A__ = (x - 1, y) A__ = (x + 1, y) A__ = (x, y + 1) A__ = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(lowercase_ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(lowercase_ ) A__ = -1 A__ = float('''inf''' ) if valid(lowercase_ ) and g_function[neighbours] > g_function[s] + 1: A__ = g_function[s] + 1 A__ = s if neighbours not in close_list_anchor: open_list[0].put(lowercase_ , key(lowercase_ , 0 , lowercase_ , lowercase_ ) ) if neighbours not in close_list_inad: for var in range(1 , lowercase_ ): if key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) <= Wa * key( lowercase_ , 0 , lowercase_ , lowercase_ ): open_list[j].put( lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) ) def SCREAMING_SNAKE_CASE ( ) -> Optional[int]: """simple docstring""" A__ = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list _lowerCamelCase : Dict = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} _lowerCamelCase : Optional[Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] _lowerCamelCase : Optional[int] = make_common_ground() _lowerCamelCase : Optional[Any] = blocks_blk # hyper parameters _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : List[Any] = 20 _lowerCamelCase : Any = 3 # one consistent and two other inconsistent # start and end destination _lowerCamelCase : str = (0, 0) _lowerCamelCase : Tuple = (n - 1, n - 1) _lowerCamelCase : int = 1 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = {start: 0, goal: float('''inf''' )} A__ = {start: -1, goal: -1} A__ = [] A__ = set() for i in range(lowercase_ ): open_list.append(PriorityQueue() ) open_list[i].put(lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) ) A__ = [] A__ = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , lowercase_ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(lowercase_ , lowercase_ , lowercase_ ) else: A__ , A__ = open_list[i].top_show() visited.add(lowercase_ ) expand_state( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) close_list_inad.append(lowercase_ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(lowercase_ , lowercase_ , lowercase_ ) else: A__ = open_list[0].top_show() visited.add(lowercase_ ) expand_state( lowercase_ , 0 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) close_list_anchor.append(lowercase_ ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(lowercase_ ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
87
1
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = (DDPMScheduler,) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any]) ->Any: '''simple docstring''' A__ = { '''num_train_timesteps''': 1_000, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**UpperCAmelCase__) return config def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Dict: '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple: '''simple docstring''' self.check_over_configs(thresholding=UpperCAmelCase__) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , sample_max_value=UpperCAmelCase__ , ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->str: '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**UpperCAmelCase__) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5 def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**UpperCAmelCase__) A__ = len(UpperCAmelCase__) A__ = self.dummy_model() A__ = self.dummy_sample_deter A__ = torch.manual_seed(0) for t in reversed(range(UpperCAmelCase__)): # 1. predict noise residual A__ = model(UpperCAmelCase__ , UpperCAmelCase__) # 2. predict previous mean of sample x_t-1 A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A__ = pred_prev_sample A__ = torch.sum(torch.abs(UpperCAmelCase__)) A__ = torch.mean(torch.abs(UpperCAmelCase__)) assert abs(result_sum.item() - 258.9606) < 1e-2 assert abs(result_mean.item() - 0.3372) < 1e-3 def SCREAMING_SNAKE_CASE ( self : int) ->int: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(prediction_type='''v_prediction''') A__ = scheduler_class(**UpperCAmelCase__) A__ = len(UpperCAmelCase__) A__ = self.dummy_model() A__ = self.dummy_sample_deter A__ = torch.manual_seed(0) for t in reversed(range(UpperCAmelCase__)): # 1. predict noise residual A__ = model(UpperCAmelCase__ , UpperCAmelCase__) # 2. predict previous mean of sample x_t-1 A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A__ = pred_prev_sample A__ = torch.sum(torch.abs(UpperCAmelCase__)) A__ = torch.mean(torch.abs(UpperCAmelCase__)) assert abs(result_sum.item() - 202.0296) < 1e-2 assert abs(result_mean.item() - 0.2631) < 1e-3 def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**UpperCAmelCase__) A__ = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=UpperCAmelCase__) A__ = scheduler.timesteps for i, timestep in enumerate(UpperCAmelCase__): if i == len(UpperCAmelCase__) - 1: A__ = -1 else: A__ = timesteps[i + 1] A__ = scheduler.previous_timestep(UpperCAmelCase__) A__ = prev_t.item() self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**UpperCAmelCase__) A__ = [100, 87, 50, 51, 0] with self.assertRaises(UpperCAmelCase__ , msg='''`custom_timesteps` must be in descending order.'''): scheduler.set_timesteps(timesteps=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**UpperCAmelCase__) A__ = [100, 87, 50, 1, 0] A__ = len(UpperCAmelCase__) with self.assertRaises(UpperCAmelCase__ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''): scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ , timesteps=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**UpperCAmelCase__) A__ = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCAmelCase__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=UpperCAmelCase__)
87
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _lowerCamelCase : Optional[Any] = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine""" def SCREAMING_SNAKE_CASE ( ) -> Dict: """simple docstring""" A__ = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: A__ = get_sagemaker_input() else: A__ = get_cluster_input() return config def SCREAMING_SNAKE_CASE ( lowercase_=None ) -> List[Any]: """simple docstring""" if subparsers is not None: A__ = subparsers.add_parser('''config''' , description=lowercase_ ) else: A__ = argparse.ArgumentParser('''Accelerate config command''' , description=lowercase_ ) parser.add_argument( '''--config_file''' , default=lowercase_ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=lowercase_ ) return parser def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = get_user_input() if args.config_file is not None: A__ = args.config_file else: if not os.path.isdir(lowercase_ ): os.makedirs(lowercase_ ) A__ = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(lowercase_ ) else: config.to_yaml_file(lowercase_ ) print(f"""accelerate configuration saved at {config_file}""" ) def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]: """simple docstring""" A__ = config_command_parser() A__ = parser.parse_args() config_command(lowercase_ ) if __name__ == "__main__": main()
87
1
from itertools import count def SCREAMING_SNAKE_CASE ( lowercase_ = 50 ) -> int: """simple docstring""" A__ = [1] * min_block_length for n in count(lowercase_ ): fill_count_functions.append(1 ) for block_length in range(lowercase_ , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_000_000: break return n if __name__ == "__main__": print(F'''{solution() = }''')
87
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() _lowerCamelCase : int = logging.get_logger("""transformers.models.speecht5""") def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple: """simple docstring""" hf_model.apply_weight_norm() A__ = checkpoint['''input_conv.weight_g'''] A__ = checkpoint['''input_conv.weight_v'''] A__ = checkpoint['''input_conv.bias'''] for i in range(len(config.upsample_rates ) ): A__ = checkpoint[f"""upsamples.{i}.1.weight_g"""] A__ = checkpoint[f"""upsamples.{i}.1.weight_v"""] A__ = checkpoint[f"""upsamples.{i}.1.bias"""] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""] A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""] A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""] A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""] A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""] A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""] A__ = checkpoint['''output_conv.1.weight_g'''] A__ = checkpoint['''output_conv.1.weight_v'''] A__ = checkpoint['''output_conv.1.bias'''] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , ) -> str: """simple docstring""" if config_path is not None: A__ = SpeechTaHifiGanConfig.from_pretrained(lowercase_ ) else: A__ = SpeechTaHifiGanConfig() A__ = SpeechTaHifiGan(lowercase_ ) A__ = torch.load(lowercase_ ) load_weights(orig_checkpoint['''model''']['''generator'''] , lowercase_ , lowercase_ ) A__ = np.load(lowercase_ ) A__ = stats[0].reshape(-1 ) A__ = stats[1].reshape(-1 ) A__ = torch.from_numpy(lowercase_ ).float() A__ = torch.from_numpy(lowercase_ ).float() model.save_pretrained(lowercase_ ) if repo_id: print('''Pushing to the hub...''' ) model.push_to_hub(lowercase_ ) if __name__ == "__main__": _lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) _lowerCamelCase : List[str] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
87
1
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm _lowerCamelCase : Any = re.compile("""[^A-Za-z_0-9]""") # parameters used in DuplicationIndex _lowerCamelCase : int = 10 _lowerCamelCase : Dict = 256 def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[MinHash]: """simple docstring""" if len(lowercase_ ) < MIN_NUM_TOKENS: return None A__ = MinHash(num_perm=lowercase_ ) for token in set(lowercase_ ): min_hash.update(token.encode() ) return min_hash def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Set[str]: """simple docstring""" return {t for t in NON_ALPHA.split(lowercase_ ) if len(t.strip() ) > 0} class UpperCamelCase_ : '''simple docstring''' def __init__( self : Tuple , *, UpperCAmelCase__ : float = 0.85 , ) ->int: '''simple docstring''' A__ = duplication_jaccard_threshold A__ = NUM_PERM A__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm) A__ = defaultdict(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : MinHash) ->None: '''simple docstring''' A__ = self._index.query(UpperCAmelCase__) if code_key in self._index.keys: print(f"""Duplicate key {code_key}""") return self._index.insert(UpperCAmelCase__ , UpperCAmelCase__) if len(UpperCAmelCase__) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(UpperCAmelCase__) break else: self._duplicate_clusters[close_duplicates[0]].add(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[List[Dict]]: '''simple docstring''' A__ = [] for base, duplicates in self._duplicate_clusters.items(): A__ = [base] + list(UpperCAmelCase__) # reformat the cluster to be a list of dict A__ = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster] duplicate_clusters.append(UpperCAmelCase__) return duplicate_clusters def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Tuple) ->None: '''simple docstring''' A__ = self.get_duplicate_clusters() with open(UpperCAmelCase__ , '''w''') as f: json.dump(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]: """simple docstring""" A__ , A__ = element A__ = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]: """simple docstring""" with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(lowercase_ , max_queue_size=10_000 ) , chunksize=100 , ): if data is not None: yield data def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict: """simple docstring""" A__ = DuplicationIndex(duplication_jaccard_threshold=lowercase_ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowercase_ ) ) , max_queue_size=100 ) ): di.add(lowercase_ , lowercase_ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> float: """simple docstring""" A__ = get_tokens(lowercase_ ) A__ = get_tokens(lowercase_ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) _lowerCamelCase : Any = None def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]: """simple docstring""" A__ = [] for elementa in cluster: A__ = _shared_dataset[elementa['''base_index''']]['''content'''] for elementa in extremes: A__ = _shared_dataset[elementa['''base_index''']]['''content'''] if jaccard_similarity(lowercase_ , lowercase_ ) >= jaccard_threshold: elementa["copies"] += 1 break else: A__ = 1 extremes.append(lowercase_ ) return extremes def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]: """simple docstring""" global _shared_dataset A__ = dataset A__ = [] A__ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowercase_ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( lowercase_ , lowercase_ , ) , total=len(lowercase_ ) , ): extremes_list.append(lowercase_ ) return extremes_list def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]: """simple docstring""" A__ = make_duplicate_clusters(lowercase_ , lowercase_ ) A__ = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster} A__ = {} A__ = find_extremes(lowercase_ , lowercase_ , lowercase_ ) for extremes in extremes_clusters: for element in extremes: A__ = element A__ = duplicate_indices - set(extreme_dict.keys() ) A__ = dataset.filter(lambda lowercase_ , lowercase_ : idx not in remove_indices , with_indices=lowercase_ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: A__ = element['''base_index'''] in extreme_dict if element["is_extreme"]: A__ = extreme_dict[element['''base_index''']]['''copies'''] print(f"""Original dataset size: {len(lowercase_ )}""" ) print(f"""Number of duplicate clusters: {len(lowercase_ )}""" ) print(f"""Files in duplicate cluster: {len(lowercase_ )}""" ) print(f"""Unique files in duplicate cluster: {len(lowercase_ )}""" ) print(f"""Filtered dataset size: {len(lowercase_ )}""" ) return ds_filter, duplicate_clusters
87
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase_ : '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : Dict=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=50 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=None , ) ->Union[str, Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = initializer_range A__ = use_labels A__ = scope def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = self.get_config() return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE ( self : int) ->int: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = self.prepare_config_and_inputs() A__ = True A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] , ) ->Dict: '''simple docstring''' A__ = BertGenerationEncoder(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__) A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] , ) ->Dict: '''simple docstring''' A__ = True A__ = BertGenerationEncoder(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , ) A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[int] , ) ->Any: '''simple docstring''' A__ = True A__ = True A__ = BertGenerationDecoder(config=UpperCAmelCase__).to(UpperCAmelCase__).eval() # first forward pass A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , ) A__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size) A__ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens] , dim=-1) A__ = torch.cat([input_mask, next_mask] , dim=-1) A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0] A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0] # select random slice A__ = ids_tensor((1,) , output_from_past.shape[-1]).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3)) def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , *UpperCAmelCase__ : List[str] , ) ->List[Any]: '''simple docstring''' A__ = BertGenerationDecoder(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ , A__ , A__ , A__ = self.prepare_config_and_inputs() A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () UpperCAmelCase__ = (BertGenerationDecoder,) if is_torch_available() else () UpperCAmelCase__ = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' A__ = BertGenerationEncoderTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() A__ = '''bert''' self.model_tester.create_and_check_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() A__ = None self.model_tester.create_and_check_model_as_decoder( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]: '''simple docstring''' A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') self.assertIsNotNone(UpperCAmelCase__) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]]) with torch.no_grad(): A__ = model(UpperCAmelCase__)[0] A__ = torch.Size([1, 8, 1_024]) self.assertEqual(output.shape , UpperCAmelCase__) A__ = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' A__ = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]]) with torch.no_grad(): A__ = model(UpperCAmelCase__)[0] A__ = torch.Size([1, 8, 50_358]) self.assertEqual(output.shape , UpperCAmelCase__) A__ = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4))
87
1
def SCREAMING_SNAKE_CASE ( lowercase_ = 100 ) -> int: """simple docstring""" A__ = 0 A__ = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(F'''{solution() = }''')
87
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging _lowerCamelCase : int = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict: """simple docstring""" A__ = set() A__ = [] def parse_line(lowercase_ ): for line in fp: if isinstance(lowercase_ , lowercase_ ): A__ = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(lowercase_ ) > 0: A__ = '''\n'''.join(lowercase_ ) # Only keep the warnings specified in `targets` if any(f""": {x}: """ in warning for x in targets ): selected_warnings.add(lowercase_ ) buffer.clear() continue else: A__ = line.strip() buffer.append(lowercase_ ) if from_gh: for filename in os.listdir(lowercase_ ): A__ = os.path.join(lowercase_ , lowercase_ ) if not os.path.isdir(lowercase_ ): # read the file if filename != "warnings.txt": continue with open(lowercase_ ) as fp: parse_line(lowercase_ ) else: try: with zipfile.ZipFile(lowercase_ ) as z: for filename in z.namelist(): if not os.path.isdir(lowercase_ ): # read the file if filename != "warnings.txt": continue with z.open(lowercase_ ) as fp: parse_line(lowercase_ ) except Exception: logger.warning( f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" ) return selected_warnings def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = set() A__ = [os.path.join(lowercase_ , lowercase_ ) for p in os.listdir(lowercase_ ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(lowercase_ , lowercase_ ) ) return selected_warnings if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" return values.split(''',''' ) _lowerCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) _lowerCamelCase : List[Any] = parser.parse_args() _lowerCamelCase : List[str] = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links _lowerCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts _lowerCamelCase : Any = extract_warnings(args.output_dir, args.targets) _lowerCamelCase : Optional[Any] = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
87
1
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Dict: """simple docstring""" if "." in tensor_name: A__ = tensor_name.split('''.''' ) for split in splits[:-1]: A__ = getattr(lowercase_ , lowercase_ ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) A__ = new_module A__ = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) A__ = tensor_name in module._buffers A__ = getattr(lowercase_ , lowercase_ ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) A__ = False A__ = False if is_buffer or not is_bitsandbytes_available(): A__ = False A__ = False else: A__ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) A__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: A__ = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: A__ = old_value.to(lowercase_ ) elif isinstance(lowercase_ , torch.Tensor ): A__ = value.to('''cpu''' ) if value.dtype == torch.inta: A__ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: A__ = torch.tensor(lowercase_ , device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , lowercase_ ) and fpaa_statistics is None: A__ = new_value.T A__ = old_value.__dict__ if is_abit: A__ = bnb.nn.IntaParams(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ ) elif is_abit: A__ = bnb.nn.Paramsabit(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ ) A__ = new_value if fpaa_statistics is not None: setattr(module.weight , '''SCB''' , fpaa_statistics.to(lowercase_ ) ) else: if value is None: A__ = old_value.to(lowercase_ ) elif isinstance(lowercase_ , torch.Tensor ): A__ = value.to(lowercase_ ) else: A__ = torch.tensor(lowercase_ , device=lowercase_ ) if is_buffer: A__ = new_value else: A__ = nn.Parameter(lowercase_ , requires_grad=old_value.requires_grad ) A__ = new_value def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False ) -> Dict: """simple docstring""" for name, module in model.named_children(): if current_key_name is None: A__ = [] current_key_name.append(lowercase_ ) if (isinstance(lowercase_ , nn.Linear ) or isinstance(lowercase_ , lowercase_ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(lowercase_ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(lowercase_ , lowercase_ ): A__ , A__ = module.weight.shape else: A__ = module.in_features A__ = module.out_features if quantization_config.quantization_method() == "llm_int8": A__ = bnb.nn.LinearabitLt( lowercase_ , lowercase_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) A__ = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: A__ = bnb.nn.Linearabit( lowercase_ , lowercase_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) A__ = True # Store the module class in case we need to transpose the weight later A__ = type(lowercase_ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(lowercase_ ) if len(list(module.children() ) ) > 0: A__ , A__ = _replace_with_bnb_linear( lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_been_replaced=lowercase_ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Tuple: """simple docstring""" A__ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert A__ , A__ = _replace_with_bnb_linear( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Dict: """simple docstring""" warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , lowercase_ , ) return replace_with_bnb_linear(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Optional[Any]: """simple docstring""" warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , lowercase_ , ) return set_module_quantized_tensor_to_device(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = deepcopy(lowercase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() A__ = find_tied_parameters(lowercase_ ) # For compatibility with Accelerate < 0.18 if isinstance(lowercase_ , lowercase_ ): A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: A__ = sum(lowercase_ , [] ) A__ = len(lowercase_ ) > 0 # Check if it is a base model A__ = not hasattr(lowercase_ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A__ = list(model.named_children() ) A__ = [list_modules[-1][0]] # add last module together with tied weights A__ = set(lowercase_ ) - set(lowercase_ ) A__ = list(set(lowercase_ ) ) + list(lowercase_ ) # remove ".weight" from the keys A__ = ['''.weight''', '''.bias'''] A__ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A__ = name.replace(lowercase_ , '''''' ) filtered_module_names.append(lowercase_ ) return filtered_module_names
87
class UpperCamelCase_ : # Public class to implement a graph '''simple docstring''' def __init__( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->None: '''simple docstring''' A__ = row A__ = col A__ = graph def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->bool: '''simple docstring''' return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->None: '''simple docstring''' A__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order A__ = [-1, 0, 1, -1, 1, -1, 0, 1] A__ = True # Make those cells visited for k in range(8): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: # And finally, count all islands. '''simple docstring''' A__ = [[False for j in range(self.COL)] for i in range(self.ROW)] A__ = 0 for i in range(self.ROW): for j in range(self.COL): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) count += 1 return count
87
1
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = '''ssube/stable-diffusion-x4-upscaler-onnx''' def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Optional[int]=0) ->Union[str, Any]: '''simple docstring''' A__ = floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCAmelCase__)) A__ = torch.manual_seed(UpperCAmelCase__) A__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: '''simple docstring''' A__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = self.get_dummy_inputs() A__ = pipe(**UpperCAmelCase__).images A__ = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) A__ = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]: '''simple docstring''' A__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') A__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase__) pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = self.get_dummy_inputs() A__ = pipe(**UpperCAmelCase__).images A__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A__ = np.array( [0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]: '''simple docstring''' A__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') A__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = self.get_dummy_inputs() A__ = pipe(**UpperCAmelCase__).images A__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A__ = np.array( [0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]: '''simple docstring''' A__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') A__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = self.get_dummy_inputs() A__ = pipe(**UpperCAmelCase__).images A__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A__ = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''') A__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = self.get_dummy_inputs() A__ = pipe(**UpperCAmelCase__).images A__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A__ = np.array( [0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE ( self : int) ->str: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict: '''simple docstring''' A__ = ort.SessionOptions() A__ = False return options def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' A__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''') A__ = init_image.resize((128, 128)) # using the PNDM scheduler by default A__ = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = '''A fantasy landscape, trending on artstation''' A__ = torch.manual_seed(0) A__ = pipe( prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase__ , output_type='''np''' , ) A__ = output.images A__ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) A__ = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict: '''simple docstring''' A__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''') A__ = init_image.resize((128, 128)) A__ = LMSDiscreteScheduler.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''') A__ = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=UpperCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = '''A fantasy landscape, trending on artstation''' A__ = torch.manual_seed(0) A__ = pipe( prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase__ , output_type='''np''' , ) A__ = output.images A__ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) A__ = np.array( [0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
87
from __future__ import annotations import requests _lowerCamelCase : str = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 1 , lowercase_ = "new" , lowercase_ = None ) -> dict: """simple docstring""" A__ = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(lowercase_ ) - valid_terms ) ): A__ = f"""Invalid search term: {invalid_search_terms}""" raise ValueError(lowercase_ ) A__ = requests.get( f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , ) if response.status_code == 429: raise requests.HTTPError A__ = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(lowercase_ )} A__ = {} for id_ in range(lowercase_ ): A__ = { item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
87
1
import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Tuple = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" A__ = OrderedDict() for key, value in state_dict.items(): if key.startswith('''module.encoder''' ): A__ = key.replace('''module.encoder''' , '''glpn.encoder''' ) if key.startswith('''module.decoder''' ): A__ = key.replace('''module.decoder''' , '''decoder.stages''' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 A__ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )] A__ = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase_ )-1}""" ) if "norm" in key: A__ = key.replace('''norm''' , '''layer_norm''' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 A__ = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )] A__ = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase_ )-1}""" ) if "layer_norm1" in key: A__ = key.replace('''layer_norm1''' , '''layer_norm_1''' ) if "layer_norm2" in key: A__ = key.replace('''layer_norm2''' , '''layer_norm_2''' ) if "block" in key: # replace for example block1 by block.0 A__ = key[key.find('''block''' ) + len('''block''' )] A__ = key.replace(f"""block{idx}""" , f"""block.{int(lowercase_ )-1}""" ) if "attn.q" in key: A__ = key.replace('''attn.q''' , '''attention.self.query''' ) if "attn.proj" in key: A__ = key.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in key: A__ = key.replace('''attn''' , '''attention.self''' ) if "fc1" in key: A__ = key.replace('''fc1''' , '''dense1''' ) if "fc2" in key: A__ = key.replace('''fc2''' , '''dense2''' ) if "linear_pred" in key: A__ = key.replace('''linear_pred''' , '''classifier''' ) if "linear_fuse" in key: A__ = key.replace('''linear_fuse.conv''' , '''linear_fuse''' ) A__ = key.replace('''linear_fuse.bn''' , '''batch_norm''' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 A__ = key[key.find('''linear_c''' ) + len('''linear_c''' )] A__ = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase_ )-1}""" ) if "bot_conv" in key: A__ = key.replace('''bot_conv''' , '''0.convolution''' ) if "skip_conv1" in key: A__ = key.replace('''skip_conv1''' , '''1.convolution''' ) if "skip_conv2" in key: A__ = key.replace('''skip_conv2''' , '''2.convolution''' ) if "fusion1" in key: A__ = key.replace('''fusion1''' , '''1.fusion''' ) if "fusion2" in key: A__ = key.replace('''fusion2''' , '''2.fusion''' ) if "fusion3" in key: A__ = key.replace('''fusion3''' , '''3.fusion''' ) if "fusion" in key and "conv" in key: A__ = key.replace('''conv''' , '''convolutional_layer''' ) if key.startswith('''module.last_layer_depth''' ): A__ = key.replace('''module.last_layer_depth''' , '''head.head''' ) A__ = value return new_state_dict def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) A__ = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" ) A__ = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict A__ = kv_weight[ : config.hidden_sizes[i], : ] A__ = kv_bias[: config.hidden_sizes[i]] A__ = kv_weight[ config.hidden_sizes[i] :, : ] A__ = kv_bias[config.hidden_sizes[i] :] def SCREAMING_SNAKE_CASE ( ) -> Tuple: """simple docstring""" A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ) return image @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False , lowercase_=None ) -> List[str]: """simple docstring""" A__ = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) A__ = GLPNImageProcessor() # prepare image A__ = prepare_img() A__ = image_processor(images=lowercase_ , return_tensors='''pt''' ).pixel_values logger.info('''Converting model...''' ) # load original state dict A__ = torch.load(lowercase_ , map_location=torch.device('''cpu''' ) ) # rename keys A__ = rename_keys(lowercase_ ) # key and value matrices need special treatment read_in_k_v(lowercase_ , lowercase_ ) # create HuggingFace model and load state dict A__ = GLPNForDepthEstimation(lowercase_ ) model.load_state_dict(lowercase_ ) model.eval() # forward pass A__ = model(lowercase_ ) A__ = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: A__ = torch.tensor( [[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] ) elif "kitti" in model_name: A__ = torch.tensor( [[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] ) else: raise ValueError(f"""Unknown model name: {model_name}""" ) A__ = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowercase_ , atol=1E-4 ) print('''Looks ok!''' ) # finally, push to hub if required if push_to_hub: logger.info('''Pushing model and image processor to the hub...''' ) model.push_to_hub( repo_path_or_name=Path(lowercase_ , lowercase_ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=lowercase_ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowercase_ , lowercase_ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=lowercase_ , ) if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) _lowerCamelCase : List[Any] = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
87
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = JukeboxTokenizer UpperCAmelCase__ = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' import torch A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''') A__ = tokenizer(**self.metas)['''input_ids'''] # fmt: off A__ = [ torch.tensor([[ 0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]]), torch.tensor([[0, 0, 0, 1_069, 11]]), torch.tensor([[0, 0, 0, 1_069, 11]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2])) @require_torch def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]: '''simple docstring''' import torch A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''') A__ = tokenizer(**self.metas)['''input_ids'''] # fmt: off A__ = [ torch.tensor([[ 0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]]), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
87
1
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar _lowerCamelCase : Optional[Any] = TypeVar("""T""") class UpperCamelCase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase__ : bool = True) ->None: '''simple docstring''' A__ = {} # dictionary of lists A__ = directed def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : T , UpperCAmelCase__ : T) ->GraphAdjacencyList[T]: '''simple docstring''' if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__) self.adj_list[destination_vertex].append(UpperCAmelCase__) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__) A__ = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(UpperCAmelCase__) A__ = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: A__ = [destination_vertex] A__ = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__) A__ = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: A__ = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: A__ = [destination_vertex] A__ = [] return self def __repr__( self : Union[str, Any]) ->str: '''simple docstring''' return pformat(self.adj_list)
87
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : List[str] = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''openai-gpt''' UpperCAmelCase__ = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict=40_478 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=1e-5 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Any="cls_index" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=0.1 , **UpperCAmelCase__ : Dict , ) ->Any: '''simple docstring''' A__ = vocab_size A__ = n_positions A__ = n_embd A__ = n_layer A__ = n_head A__ = afn A__ = resid_pdrop A__ = embd_pdrop A__ = attn_pdrop A__ = layer_norm_epsilon A__ = initializer_range A__ = summary_type A__ = summary_use_proj A__ = summary_activation A__ = summary_first_dropout A__ = summary_proj_to_labels super().__init__(**UpperCAmelCase__)
87
1
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": _lowerCamelCase : str = pd.read_csv("""sample_data.csv""", header=None) _lowerCamelCase : List[Any] = df.shape[:1][0] # If you're using some other dataset input the target column _lowerCamelCase : Tuple = df.iloc[:, 1:2] _lowerCamelCase : Optional[Any] = actual_data.values.reshape(len_data, 1) _lowerCamelCase : List[str] = MinMaxScaler().fit_transform(actual_data) _lowerCamelCase : str = 10 _lowerCamelCase : Optional[int] = 5 _lowerCamelCase : Any = 20 _lowerCamelCase : Optional[int] = len_data - periods * look_back _lowerCamelCase : Optional[Any] = actual_data[:division] _lowerCamelCase : Optional[Any] = actual_data[division - look_back :] _lowerCamelCase , _lowerCamelCase : Optional[int] = [], [] _lowerCamelCase , _lowerCamelCase : List[Any] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) _lowerCamelCase : Optional[int] = np.array(train_x) _lowerCamelCase : str = np.array(test_x) _lowerCamelCase : Tuple = np.array([list(i.ravel()) for i in train_y]) _lowerCamelCase : Tuple = np.array([list(i.ravel()) for i in test_y]) _lowerCamelCase : Any = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") _lowerCamelCase : int = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) _lowerCamelCase : Optional[int] = model.predict(x_test)
87
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
87
1
import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class UpperCamelCase_ : '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : List[Any]=7 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=99 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : str=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : str=37 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : Union[str, Any]=None , ) ->Optional[Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) A__ = None A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) A__ = ids_tensor([self.batch_size] , self.num_choices) A__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]: '''simple docstring''' return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase__ , ) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str]) ->List[str]: '''simple docstring''' A__ = FalconModel(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__) A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , ) ->Optional[int]: '''simple docstring''' A__ = True A__ = FalconModel(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , ) A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , ) A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , ) ->int: '''simple docstring''' A__ = FalconForCausalLM(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , ) ->Optional[int]: '''simple docstring''' A__ = True A__ = True A__ = FalconForCausalLM(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() # first forward pass A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , ) A__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size) A__ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens] , dim=-1) A__ = torch.cat([input_mask, next_mask] , dim=-1) A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0] A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0] # select random slice A__ = ids_tensor((1,) , output_from_past.shape[-1]).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3)) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) UpperCAmelCase__ = (FalconForCausalLM,) if is_torch_available() else () UpperCAmelCase__ = ( { '''feature-extraction''': FalconModel, '''text-classification''': FalconForSequenceClassification, '''text-generation''': FalconForCausalLM, '''question-answering''': FalconForQuestionAnswering, '''token-classification''': FalconForTokenClassification, '''zero-shot''': FalconForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Tuple: '''simple docstring''' A__ = FalconModelTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]: '''simple docstring''' A__ , *A__ = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: A__ = alibi self.model_tester.create_and_check_model(UpperCAmelCase__ , *UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = 3 A__ = input_dict['''input_ids'''] A__ = input_ids.ne(1).to(UpperCAmelCase__) A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) A__ = FalconForSequenceClassification(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = 3 A__ = '''single_label_classification''' A__ = input_dict['''input_ids'''] A__ = input_ids.ne(1).to(UpperCAmelCase__) A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) A__ = FalconForSequenceClassification(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = input_dict['''input_ids'''] A__ = FalconForCausalLM(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__) A__ = input_ids.shape[0] A__ = model._convert_to_rw_cache(result.past_key_values) A__ = model._convert_cache_to_standard_format(UpperCAmelCase__ , UpperCAmelCase__) for layer in range(len(UpperCAmelCase__)): for tensor_idx in range(2): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx])) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = 3 A__ = '''multi_label_classification''' A__ = input_dict['''input_ids'''] A__ = input_ids.ne(1).to(UpperCAmelCase__) A__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) A__ = FalconForSequenceClassification(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]: '''simple docstring''' for model_class in self.all_generative_model_classes: A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(UpperCAmelCase__ , '''use_cache'''): return A__ = model_class(UpperCAmelCase__).to(UpperCAmelCase__) if "use_cache" not in inputs: A__ = True A__ = model(**UpperCAmelCase__) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return A__ = ( getattr(UpperCAmelCase__ , '''decoder_layers''' , UpperCAmelCase__) or getattr(UpperCAmelCase__ , '''num_decoder_layers''' , UpperCAmelCase__) or config.num_hidden_layers ) A__ = getattr(UpperCAmelCase__ , '''num_kv_heads''' , config.num_attention_heads) A__ = getattr(UpperCAmelCase__ , '''d_model''' , config.hidden_size) A__ = embed_dim // num_attention_heads A__ = outputs['''past_key_values'''] self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) A__ , A__ = inputs['''input_ids'''].shape for i in range(UpperCAmelCase__): if config.new_decoder_architecture: A__ = config.num_attention_heads elif config.multi_query: A__ = 1 self.assertEqual(len(past_kv[0]) , 2) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim)) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim)) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: '''simple docstring''' A__ = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''') A__ = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''') model.eval() model.to(UpperCAmelCase__) A__ = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(UpperCAmelCase__) A__ = ( '''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.''' ) A__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=19) A__ = tokenizer.batch_decode(UpperCAmelCase__)[0] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: '''simple docstring''' for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: A__ = AutoTokenizer.from_pretrained(UpperCAmelCase__) A__ = FalconForCausalLM.from_pretrained(UpperCAmelCase__) model.eval() model.to(UpperCAmelCase__) A__ = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(UpperCAmelCase__) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4) model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4) model.generate(**UpperCAmelCase__ , num_beams=2 , max_new_tokens=4) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: A__ = AutoTokenizer.from_pretrained(UpperCAmelCase__) A__ = FalconForCausalLM.from_pretrained(UpperCAmelCase__) model.eval() model.to(device=UpperCAmelCase__) A__ = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(UpperCAmelCase__) # Test results are the same with and without cache A__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=20 , use_cache=UpperCAmelCase__) A__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=20 , use_cache=UpperCAmelCase__) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
87
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Dict: """simple docstring""" if "." in tensor_name: A__ = tensor_name.split('''.''' ) for split in splits[:-1]: A__ = getattr(lowercase_ , lowercase_ ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) A__ = new_module A__ = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) A__ = tensor_name in module._buffers A__ = getattr(lowercase_ , lowercase_ ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) A__ = False A__ = False if is_buffer or not is_bitsandbytes_available(): A__ = False A__ = False else: A__ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) A__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: A__ = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: A__ = old_value.to(lowercase_ ) elif isinstance(lowercase_ , torch.Tensor ): A__ = value.to('''cpu''' ) if value.dtype == torch.inta: A__ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: A__ = torch.tensor(lowercase_ , device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , lowercase_ ) and fpaa_statistics is None: A__ = new_value.T A__ = old_value.__dict__ if is_abit: A__ = bnb.nn.IntaParams(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ ) elif is_abit: A__ = bnb.nn.Paramsabit(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ ) A__ = new_value if fpaa_statistics is not None: setattr(module.weight , '''SCB''' , fpaa_statistics.to(lowercase_ ) ) else: if value is None: A__ = old_value.to(lowercase_ ) elif isinstance(lowercase_ , torch.Tensor ): A__ = value.to(lowercase_ ) else: A__ = torch.tensor(lowercase_ , device=lowercase_ ) if is_buffer: A__ = new_value else: A__ = nn.Parameter(lowercase_ , requires_grad=old_value.requires_grad ) A__ = new_value def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False ) -> Dict: """simple docstring""" for name, module in model.named_children(): if current_key_name is None: A__ = [] current_key_name.append(lowercase_ ) if (isinstance(lowercase_ , nn.Linear ) or isinstance(lowercase_ , lowercase_ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(lowercase_ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(lowercase_ , lowercase_ ): A__ , A__ = module.weight.shape else: A__ = module.in_features A__ = module.out_features if quantization_config.quantization_method() == "llm_int8": A__ = bnb.nn.LinearabitLt( lowercase_ , lowercase_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) A__ = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: A__ = bnb.nn.Linearabit( lowercase_ , lowercase_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) A__ = True # Store the module class in case we need to transpose the weight later A__ = type(lowercase_ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(lowercase_ ) if len(list(module.children() ) ) > 0: A__ , A__ = _replace_with_bnb_linear( lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_been_replaced=lowercase_ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Tuple: """simple docstring""" A__ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert A__ , A__ = _replace_with_bnb_linear( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Dict: """simple docstring""" warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , lowercase_ , ) return replace_with_bnb_linear(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Optional[Any]: """simple docstring""" warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , lowercase_ , ) return set_module_quantized_tensor_to_device(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = deepcopy(lowercase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() A__ = find_tied_parameters(lowercase_ ) # For compatibility with Accelerate < 0.18 if isinstance(lowercase_ , lowercase_ ): A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: A__ = sum(lowercase_ , [] ) A__ = len(lowercase_ ) > 0 # Check if it is a base model A__ = not hasattr(lowercase_ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A__ = list(model.named_children() ) A__ = [list_modules[-1][0]] # add last module together with tied weights A__ = set(lowercase_ ) - set(lowercase_ ) A__ = list(set(lowercase_ ) ) + list(lowercase_ ) # remove ".weight" from the keys A__ = ['''.weight''', '''.bias'''] A__ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A__ = name.replace(lowercase_ , '''''' ) filtered_module_names.append(lowercase_ ) return filtered_module_names
87
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _lowerCamelCase : Optional[Any] = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine""" def SCREAMING_SNAKE_CASE ( ) -> Dict: """simple docstring""" A__ = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: A__ = get_sagemaker_input() else: A__ = get_cluster_input() return config def SCREAMING_SNAKE_CASE ( lowercase_=None ) -> List[Any]: """simple docstring""" if subparsers is not None: A__ = subparsers.add_parser('''config''' , description=lowercase_ ) else: A__ = argparse.ArgumentParser('''Accelerate config command''' , description=lowercase_ ) parser.add_argument( '''--config_file''' , default=lowercase_ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=lowercase_ ) return parser def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = get_user_input() if args.config_file is not None: A__ = args.config_file else: if not os.path.isdir(lowercase_ ): os.makedirs(lowercase_ ) A__ = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(lowercase_ ) else: config.to_yaml_file(lowercase_ ) print(f"""accelerate configuration saved at {config_file}""" ) def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]: """simple docstring""" A__ = config_command_parser() A__ = parser.parse_args() config_command(lowercase_ ) if __name__ == "__main__": main()
87
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) _lowerCamelCase : str = 299792458 # Symbols _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = symbols("""ct x y z""") def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float: """simple docstring""" if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float: """simple docstring""" return 1 / sqrt(1 - beta(lowercase_ ) ** 2 ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> np.ndarray: """simple docstring""" return np.array( [ [gamma(lowercase_ ), -gamma(lowercase_ ) * beta(lowercase_ ), 0, 0], [-gamma(lowercase_ ) * beta(lowercase_ ), gamma(lowercase_ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = None ) -> np.ndarray: """simple docstring""" if event is None: A__ = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(lowercase_ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: _lowerCamelCase : Tuple = transform(29979245) print("""Example of four vector: """) print(F'''ct\' = {four_vector[0]}''') print(F'''x\' = {four_vector[1]}''') print(F'''y\' = {four_vector[2]}''') print(F'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values _lowerCamelCase : int = {ct: c, x: 1, y: 1, z: 1} _lowerCamelCase : Any = [four_vector[i].subs(sub_dict) for i in range(4)] print(F'''\n{numerical_vector}''')
87
1
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Any=99 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Any=5 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : Union[str, Any]=37 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Dict=4 , ) ->Optional[Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_attention_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_choices def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_attention_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) A__ = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE ( self : Tuple) ->int: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = True A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = True UpperCAmelCase__ = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]: '''simple docstring''' A__ = FlaxRobertaPreLayerNormModelTester(self) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str: '''simple docstring''' for model_class_name in self.all_model_classes: A__ = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCAmelCase__) A__ = model(np.ones((1, 1))) self.assertIsNotNone(UpperCAmelCase__) @require_flax class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]: '''simple docstring''' A__ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCAmelCase__) A__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa) A__ = model(UpperCAmelCase__)[0] A__ = [1, 11, 50_265] self.assertEqual(list(output.shape) , UpperCAmelCase__) # compare the actual values for a slice. A__ = np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa) self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)) @slow def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' A__ = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCAmelCase__) A__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa) A__ = model(UpperCAmelCase__)[0] # compare the actual values for a slice. A__ = np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa) self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4))
87
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list: """simple docstring""" if len(lowercase_ ) <= 1: return [tuple(lowercase_ )] A__ = [] def generate(lowercase_ , lowercase_ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowercase_ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A__ , A__ = arr[k - 1], arr[i] else: # k is odd A__ , A__ = arr[k - 1], arr[0] generate(k - 1 , lowercase_ ) generate(len(lowercase_ ) , lowercase_ ) return res if __name__ == "__main__": _lowerCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip() _lowerCamelCase : str = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
87
1
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any=13 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Dict=[10, 20, 30, 40] , UpperCAmelCase__ : Union[str, Any]=[2, 2, 3, 2] , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Any=10 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[str]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Tuple=[2, 3, 4] , UpperCAmelCase__ : str=None , ) ->Optional[Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = num_channels A__ = num_stages A__ = hidden_sizes A__ = depths A__ = is_training A__ = use_labels A__ = intermediate_size A__ = hidden_act A__ = num_labels A__ = initializer_range A__ = out_features A__ = out_indices A__ = scope def SCREAMING_SNAKE_CASE ( self : str) ->Tuple: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.num_labels) A__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self : Tuple) ->Any: '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : str) ->List[str]: '''simple docstring''' A__ = ConvNextVaModel(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int) ->Any: '''simple docstring''' A__ = ConvNextVaForImageClassification(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str) ->str: '''simple docstring''' A__ = ConvNextVaBackbone(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__) # verify hidden states self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:]) # verify backbone works with out_features=None A__ = None A__ = ConvNextVaBackbone(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'''pixel_values''': pixel_values} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'''pixel_values''': pixel_values, '''labels''': labels} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) UpperCAmelCase__ = ( {'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification} if is_torch_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: '''simple docstring''' A__ = ConvNextVaModelTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : str) ->Tuple: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' return @unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''') def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''') def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any: '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''') def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]: '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: A__ , A__ = self.model_tester.prepare_config_and_inputs_with_labels() A__ = True if model_class.__name__ in [ *get_values(UpperCAmelCase__), *get_values(UpperCAmelCase__), ]: continue A__ = model_class(UpperCAmelCase__) model.to(UpperCAmelCase__) model.train() A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__) A__ = model(**UpperCAmelCase__).loss loss.backward() def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: A__ , A__ = self.model_tester.prepare_config_and_inputs_with_labels() A__ = False A__ = True if ( model_class.__name__ in [*get_values(UpperCAmelCase__), *get_values(UpperCAmelCase__)] or not model_class.supports_gradient_checkpointing ): continue A__ = model_class(UpperCAmelCase__) model.to(UpperCAmelCase__) model.gradient_checkpointing_enable() model.train() A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__) A__ = model(**UpperCAmelCase__).loss loss.backward() def SCREAMING_SNAKE_CASE ( self : Any) ->int: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCAmelCase__) A__ = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any]): A__ = model_class(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)) A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ = self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase__) , expected_num_stages + 1) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]: '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = ConvNextVaModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''') if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' A__ = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''').to(UpperCAmelCase__) A__ = self.default_image_processor A__ = prepare_img() A__ = preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''').to(UpperCAmelCase__) # forward pass with torch.no_grad(): A__ = model(**UpperCAmelCase__) # verify the logits A__ = torch.Size((1, 1_000)) self.assertEqual(outputs.logits.shape , UpperCAmelCase__) A__ = torch.tensor([0.9996, 0.1966, -0.4386]).to(UpperCAmelCase__) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
87
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" return 1.0 / (1.0 + np.exp(-_outputs )) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = np.max(_outputs , axis=-1 , keepdims=lowercase_ ) A__ = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase_ ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''sigmoid''' UpperCAmelCase__ = '''softmax''' UpperCAmelCase__ = '''none''' @add_end_docstrings( UpperCAmelCase__ , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = False UpperCAmelCase__ = ClassificationFunction.NONE def __init__( self : Any , **UpperCAmelCase__ : Optional[Any]) ->str: '''simple docstring''' super().__init__(**UpperCAmelCase__) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int="" , **UpperCAmelCase__ : Any) ->int: '''simple docstring''' A__ = tokenizer_kwargs A__ = {} if hasattr(self.model.config , '''return_all_scores''') and return_all_scores is None: A__ = self.model.config.return_all_scores if isinstance(UpperCAmelCase__ , UpperCAmelCase__) or top_k is None: A__ = top_k A__ = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , UpperCAmelCase__ , ) if return_all_scores: A__ = None else: A__ = 1 if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: A__ = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int]) ->Union[str, Any]: '''simple docstring''' A__ = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__) # TODO try and retrieve it in a nicer way from _sanitize_parameters. A__ = '''top_k''' not in kwargs if isinstance(args[0] , UpperCAmelCase__) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Any , **UpperCAmelCase__ : str) ->Dict[str, GenericTensor]: '''simple docstring''' A__ = self.framework if isinstance(UpperCAmelCase__ , UpperCAmelCase__): return self.tokenizer(**UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__) and len(UpperCAmelCase__) == 1 and isinstance(inputs[0] , UpperCAmelCase__) and len(inputs[0]) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''') return self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple) ->Tuple: '''simple docstring''' return self.model(**UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : str=True) ->Dict: '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: A__ = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: A__ = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''') and function_to_apply is None: A__ = self.model.config.function_to_apply else: A__ = ClassificationFunction.NONE A__ = model_outputs['''logits'''][0] A__ = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: A__ = sigmoid(UpperCAmelCase__) elif function_to_apply == ClassificationFunction.SOFTMAX: A__ = softmax(UpperCAmelCase__) elif function_to_apply == ClassificationFunction.NONE: A__ = outputs else: raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""") if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} A__ = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(UpperCAmelCase__) ] if not _legacy: dict_scores.sort(key=lambda UpperCAmelCase__: x["score"] , reverse=UpperCAmelCase__) if top_k is not None: A__ = dict_scores[:top_k] return dict_scores
87
1
import math from collections.abc import Iterator from itertools import takewhile def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def SCREAMING_SNAKE_CASE ( ) -> Iterator[int]: """simple docstring""" A__ = 2 while True: if is_prime(lowercase_ ): yield num num += 1 def SCREAMING_SNAKE_CASE ( lowercase_ = 2_000_000 ) -> int: """simple docstring""" return sum(takewhile(lambda lowercase_ : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
87
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCamelCase : Any = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Union[str, Any] = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
1
from sklearn.metrics import mean_squared_error import datasets _lowerCamelCase : Optional[Any] = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ _lowerCamelCase : Optional[Any] = """\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. """ _lowerCamelCase : Tuple = """ Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. \"raw_values\" : Returns a full set of errors in case of multioutput input. \"uniform_average\" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric(\"mse\") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {'mse': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {'mse': 0.6123724356957945} If you're using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {'mse': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mse': array([0.41666667, 1. ])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''')), "references": datasets.Sequence(datasets.Value('''float''')), } else: return { "predictions": datasets.Value('''float'''), "references": datasets.Value('''float'''), } def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Optional[Any]="uniform_average" , UpperCAmelCase__ : Tuple=True) ->Optional[Any]: '''simple docstring''' A__ = mean_squared_error( UpperCAmelCase__ , UpperCAmelCase__ , sample_weight=UpperCAmelCase__ , multioutput=UpperCAmelCase__ , squared=UpperCAmelCase__) return {"mse": mse}
87
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Union[str, Any] = { """google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""", """google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''mobilenet_v1''' def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Optional[Any]=224 , UpperCAmelCase__ : Optional[int]=1.0 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : Tuple="relu6" , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=0.999 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[int]=0.001 , **UpperCAmelCase__ : Dict , ) ->List[str]: '''simple docstring''' super().__init__(**UpperCAmelCase__) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''') A__ = num_channels A__ = image_size A__ = depth_multiplier A__ = min_depth A__ = hidden_act A__ = tf_padding A__ = classifier_dropout_prob A__ = initializer_range A__ = layer_norm_eps class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self : Any) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict([('''pixel_values''', {0: '''batch'''})]) @property def SCREAMING_SNAKE_CASE ( self : List[str]) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})]) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})]) @property def SCREAMING_SNAKE_CASE ( self : int) ->float: '''simple docstring''' return 1e-4
87
1
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]: """simple docstring""" print('''Loading config file...''' ) def flatten_yaml_as_dict(lowercase_ , lowercase_="" , lowercase_="." ): A__ = [] for k, v in d.items(): A__ = parent_key + sep + k if parent_key else k if isinstance(lowercase_ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(lowercase_ , lowercase_ , sep=lowercase_ ).items() ) else: items.append((new_key, v) ) return dict(lowercase_ ) A__ = argparse.Namespace() with open(lowercase_ , '''r''' ) as yaml_file: try: A__ = yaml.load(lowercase_ , Loader=yaml.FullLoader ) A__ = flatten_yaml_as_dict(lowercase_ ) for k, v in flat_cfg.items(): setattr(lowercase_ , lowercase_ , lowercase_ ) except yaml.YAMLError as exc: logger.error('''Error while loading config file: {}. Error message: {}'''.format(lowercase_ , str(lowercase_ ) ) ) return config def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = MobileViTVaConfig() A__ = False # dataset if task_name.startswith('''imagenet1k_''' ): A__ = 1_000 if int(task_name.strip().split('''_''' )[-1] ) == 384: A__ = 384 else: A__ = 256 A__ = '''imagenet-1k-id2label.json''' elif task_name.startswith('''imagenet21k_to_1k_''' ): A__ = 21_000 if int(task_name.strip().split('''_''' )[-1] ) == 384: A__ = 384 else: A__ = 256 A__ = '''imagenet-22k-id2label.json''' elif task_name.startswith('''ade20k_''' ): A__ = 151 A__ = 512 A__ = '''ade20k-id2label.json''' A__ = True elif task_name.startswith('''voc_''' ): A__ = 21 A__ = 512 A__ = '''pascal-voc-id2label.json''' A__ = True # orig_config A__ = load_orig_config_file(lowercase_ ) assert getattr(lowercase_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model" A__ = getattr(lowercase_ , '''model.classification.mitv2.width_multiplier''' , 1.0 ) assert ( getattr(lowercase_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" A__ = getattr(lowercase_ , '''model.classification.activation.name''' , '''swish''' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: A__ = getattr(lowercase_ , '''model.segmentation.output_stride''' , 16 ) if "_deeplabv3" in task_name: A__ = getattr(lowercase_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] ) A__ = getattr(lowercase_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 ) A__ = getattr(lowercase_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 ) # id2label A__ = '''huggingface/label-files''' A__ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) ) A__ = {int(lowercase_ ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = dct.pop(lowercase_ ) A__ = val def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Union[str, Any]: """simple docstring""" if base_model: A__ = '''''' else: A__ = '''mobilevitv2.''' A__ = [] for k in state_dict.keys(): if k[:8] == "encoder.": A__ = k[8:] else: A__ = k if ".block." in k: A__ = k_new.replace('''.block.''' , '''.''' ) if ".conv." in k: A__ = k_new.replace('''.conv.''' , '''.convolution.''' ) if ".norm." in k: A__ = k_new.replace('''.norm.''' , '''.normalization.''' ) if "conv_1." in k: A__ = k_new.replace('''conv_1.''' , f"""{model_prefix}conv_stem.""" ) for i in [1, 2]: if f"""layer_{i}.""" in k: A__ = k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""" ) if ".exp_1x1." in k: A__ = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' ) if ".red_1x1." in k: A__ = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' ) for i in [3, 4, 5]: if f"""layer_{i}.0.""" in k: A__ = k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" ) if f"""layer_{i}.1.local_rep.0.""" in k: A__ = k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" ) if f"""layer_{i}.1.local_rep.1.""" in k: A__ = k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" ) for i in [3, 4, 5]: if i == 3: A__ = [0, 1] elif i == 4: A__ = [0, 1, 2, 3] elif i == 5: A__ = [0, 1, 2] for j in j_in: if f"""layer_{i}.1.global_rep.{j}.""" in k: A__ = k_new.replace( f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" ) if f"""layer_{i}.1.global_rep.{j+1}.""" in k: A__ = k_new.replace( f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""" ) if f"""layer_{i}.1.conv_proj.""" in k: A__ = k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" ) if "pre_norm_attn.0." in k: A__ = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' ) if "pre_norm_attn.1." in k: A__ = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' ) if "pre_norm_ffn.0." in k: A__ = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' ) if "pre_norm_ffn.1." in k: A__ = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' ) if "pre_norm_ffn.3." in k: A__ = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' ) if "classifier.1." in k: A__ = k_new.replace('''classifier.1.''' , '''classifier.''' ) if "seg_head." in k: A__ = k_new.replace('''seg_head.''' , '''segmentation_head.''' ) if ".aspp_layer." in k: A__ = k_new.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in k: A__ = k_new.replace('''.aspp_pool.''' , '''.''' ) rename_keys.append((k, k_new) ) return rename_keys def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]: """simple docstring""" A__ = [] for k in state_dict.keys(): if k.startswith('''seg_head.aux_head.''' ): keys_to_ignore.append(lowercase_ ) for k in keys_to_ignore: state_dict.pop(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]: """simple docstring""" A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple: """simple docstring""" A__ = get_mobilevitva_config(lowercase_ , lowercase_ ) # load original state_dict A__ = torch.load(lowercase_ , map_location='''cpu''' ) # load huggingface model if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ): A__ = MobileViTVaForSemanticSegmentation(lowercase_ ).eval() A__ = False else: A__ = MobileViTVaForImageClassification(lowercase_ ).eval() A__ = False # remove and rename some keys of load the original model A__ = checkpoint remove_unused_keys(lowercase_ ) A__ = create_rename_keys(lowercase_ , base_model=lowercase_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowercase_ , lowercase_ , lowercase_ ) # load modified state_dict model.load_state_dict(lowercase_ ) # Check outputs on an image, prepared by MobileViTImageProcessor A__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) A__ = image_processor(images=prepare_img() , return_tensors='''pt''' ) A__ = model(**lowercase_ ) # verify classification model if task_name.startswith('''imagenet''' ): A__ = outputs.logits A__ = logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0: # expected_logits for base variant A__ = torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] ) assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ) Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase_ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowercase_ ) if __name__ == "__main__": _lowerCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""imagenet1k_256""", type=str, help=( """Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """ """ Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 """ ), choices=[ """imagenet1k_256""", """imagenet1k_384""", """imagenet21k_to_1k_256""", """imagenet21k_to_1k_384""", """ade20k_deeplabv3""", """voc_deeplabv3""", ], ) parser.add_argument( """--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file).""" ) parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) _lowerCamelCase : int = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
87
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp _lowerCamelCase : str = 5 _lowerCamelCase : int = 10 @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = SpeechaTextTokenizer UpperCAmelCase__ = False UpperCAmelCase__ = True def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' super().setUp() A__ = sp.SentencePieceProcessor() spm_model.Load(UpperCAmelCase__) A__ = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_) for id_ in range(len(UpperCAmelCase__))] A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__)))) A__ = Path(self.tmpdirname) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file''']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file''']) A__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]: '''simple docstring''' A__ = '''<pad>''' A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<s>''') self.assertEqual(vocab_keys[1] , '''<pad>''') self.assertEqual(vocab_keys[-1] , '''j''') self.assertEqual(len(UpperCAmelCase__) , 1_001) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_001) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' A__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) A__ = tokenizer.tokenize('''This is a test''') self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , [289, 50, 14, 174, 386] , ) A__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') self.assertListEqual( UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) A__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__) self.assertListEqual(UpperCAmelCase__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8]) A__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__) self.assertListEqual( UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: '''simple docstring''' A__ = {'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , ) @require_sentencepiece class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = '''valhalla/s2t_mustc_multilinguial_medium''' UpperCAmelCase__ = '''C\'est trop cool''' UpperCAmelCase__ = '''Esto es genial''' @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict) ->Dict: '''simple docstring''' A__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name) return cls def SCREAMING_SNAKE_CASE ( self : str) ->Dict: '''simple docstring''' self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' self.assertEqual(self.tokenizer.vocab_size , 10_000) def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids) A__ = [ES_CODE, 4, 1_601, 47, 7_647, 2] A__ = self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__) A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->str: '''simple docstring''' A__ = '''fr''' A__ = self.tokenizer(self.french_text).input_ids self.assertEqual(encoded[0] , UpperCAmelCase__) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' A__ = '''fr''' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE]) A__ = '''es''' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
87
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Tuple = { """configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[int] = [ """TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TimesformerModel""", """TimesformerForVideoClassification""", """TimesformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys _lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
from __future__ import annotations import requests def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict: """simple docstring""" A__ = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty""" return requests.get(lowercase_ ).json() def SCREAMING_SNAKE_CASE ( lowercase_ = 10 ) -> list[dict]: """simple docstring""" A__ = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty''' A__ = requests.get(lowercase_ ).json()[:max_stories] return [get_hackernews_story(lowercase_ ) for story_id in story_ids] def SCREAMING_SNAKE_CASE ( lowercase_ = 10 ) -> str: """simple docstring""" A__ = hackernews_top_stories(lowercase_ ) return "\n".join('''* [{title}]({url})'''.format(**lowercase_ ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
87
1
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''image_processor''', '''tokenizer'''] UpperCAmelCase__ = '''LayoutLMv2ImageProcessor''' UpperCAmelCase__ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''') def __init__( self : Any , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Optional[int]) ->Optional[Any]: '''simple docstring''' if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , UpperCAmelCase__ , ) A__ = kwargs.pop('''feature_extractor''') A__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''') if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''') super().__init__(UpperCAmelCase__ , UpperCAmelCase__) def __call__( self : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase__ : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase__ : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase__ : Union[str, Any] , ) ->BatchEncoding: '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes ''' '''if you initialized the image processor with apply_ocr set to True.''') if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''') if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''') # first, apply the image processor A__ = self.image_processor(images=UpperCAmelCase__ , return_tensors=UpperCAmelCase__) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = [text] # add batch dimension (as the image processor always adds a batch dimension) A__ = features['''words'''] A__ = self.tokenizer( text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , ) # add pixel values A__ = features.pop('''pixel_values''') if return_overflowing_tokens is True: A__ = self.get_overflowing_images(UpperCAmelCase__ , encoded_inputs['''overflow_to_sample_mapping''']) A__ = images return encoded_inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple) ->Union[str, Any]: '''simple docstring''' A__ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx]) if len(UpperCAmelCase__) != len(UpperCAmelCase__): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' f""" {len(UpperCAmelCase__)} and {len(UpperCAmelCase__)}""") return images_with_overflow def SCREAMING_SNAKE_CASE ( self : Tuple , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int]) ->int: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Dict) ->int: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__) @property def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "image"] @property def SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]: '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase__ , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase__ , ) return self.image_processor
87
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _lowerCamelCase : Optional[List[str]] = None _lowerCamelCase : int = """<""" if sys.byteorder == """little""" else """>""" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _lowerCamelCase : Union[str, Any] = [ np.dtype("""|b1"""), np.dtype("""|u1"""), np.dtype("""<u2"""), np.dtype(""">u2"""), np.dtype("""<i2"""), np.dtype(""">i2"""), np.dtype("""<u4"""), np.dtype(""">u4"""), np.dtype("""<i4"""), np.dtype(""">i4"""), np.dtype("""<f4"""), np.dtype(""">f4"""), np.dtype("""<f8"""), np.dtype(""">f8"""), ] @dataclass class UpperCamelCase_ : '''simple docstring''' UpperCAmelCase__ = True UpperCAmelCase__ = None # Automatically constructed UpperCAmelCase__ = "PIL.Image.Image" UpperCAmelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) UpperCAmelCase__ = field(default='''Image''' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ ) def __call__( self : List[str]) ->List[str]: '''simple docstring''' return self.pa_type def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) ->dict: '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''') if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = np.array(UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__): return {"path": value, "bytes": None} elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): return {"path": None, "bytes": value} elif isinstance(UpperCAmelCase__ , np.ndarray): # convert the image array to PNG/TIFF bytes return encode_np_array(UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , PIL.Image.Image): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(UpperCAmelCase__) elif value.get('''path''') is not None and os.path.isfile(value['''path''']): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('''path''')} elif value.get('''bytes''') is not None or value.get('''path''') is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('''bytes'''), "path": value.get('''path''')} else: raise ValueError( f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""") def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : dict , UpperCAmelCase__ : str=None) ->"PIL.Image.Image": '''simple docstring''' if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''') if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support decoding images, please install \'Pillow\'.''') if token_per_repo_id is None: A__ = {} A__ , A__ = value['''path'''], value['''bytes'''] if bytes_ is None: if path is None: raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""") else: if is_local_path(UpperCAmelCase__): A__ = PIL.Image.open(UpperCAmelCase__) else: A__ = path.split('''::''')[-1] try: A__ = string_to_dict(UpperCAmelCase__ , config.HUB_DATASETS_URL)['''repo_id'''] A__ = token_per_repo_id.get(UpperCAmelCase__) except ValueError: A__ = None with xopen(UpperCAmelCase__ , '''rb''' , use_auth_token=UpperCAmelCase__) as f: A__ = BytesIO(f.read()) A__ = PIL.Image.open(bytes_) else: A__ = PIL.Image.open(BytesIO(bytes_)) image.load() # to avoid "Too many open files" errors return image def SCREAMING_SNAKE_CASE ( self : Dict) ->Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Value return ( self if self.decode else { "bytes": Value('''binary'''), "path": Value('''string'''), } ) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[pa.StringArray, pa.StructArray, pa.ListArray]) ->pa.StructArray: '''simple docstring''' if pa.types.is_string(storage.type): A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.binary()) A__ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null()) elif pa.types.is_binary(storage.type): A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string()) A__ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null()) elif pa.types.is_struct(storage.type): if storage.type.get_field_index('''bytes''') >= 0: A__ = storage.field('''bytes''') else: A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.binary()) if storage.type.get_field_index('''path''') >= 0: A__ = storage.field('''path''') else: A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string()) A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null()) elif pa.types.is_list(storage.type): A__ = pa.array( [encode_np_array(np.array(UpperCAmelCase__))['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string()) A__ = pa.StructArray.from_arrays( [bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null()) return array_cast(UpperCAmelCase__ , self.pa_type) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : pa.StructArray) ->pa.StructArray: '''simple docstring''' @no_op_if_value_is_null def path_to_bytes(UpperCAmelCase__ : Dict): with xopen(UpperCAmelCase__ , '''rb''') as f: A__ = f.read() return bytes_ A__ = pa.array( [ (path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) A__ = pa.array( [os.path.basename(UpperCAmelCase__) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , ) A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null()) return array_cast(UpperCAmelCase__ , self.pa_type) def SCREAMING_SNAKE_CASE ( ) -> List[str]: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() A__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bytes: """simple docstring""" A__ = BytesIO() if image.format in list_image_compression_formats(): A__ = image.format else: A__ = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF''' image.save(lowercase_ , format=lowercase_ ) return buffer.getvalue() def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict: """simple docstring""" if hasattr(lowercase_ , '''filename''' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(lowercase_ )} def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) A__ = array.dtype A__ = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER A__ = dtype.kind A__ = dtype.itemsize A__ = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: A__ = np.dtype('''|u1''' ) if dtype_kind not in ["u", "i"]: raise TypeError( f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: A__ = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: A__ = dtype_byteorder + dtype_kind + str(lowercase_ ) A__ = np.dtype(lowercase_ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) A__ = PIL.Image.fromarray(array.astype(lowercase_ ) ) return {"path": None, "bytes": image_to_bytes(lowercase_ )} def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[dict]: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if objs: A__ , A__ = first_non_null_value(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(lowercase_ , np.ndarray ): A__ = no_op_if_value_is_null(lowercase_ ) return [obj_to_image_dict_func(lowercase_ ) for obj in objs] elif isinstance(lowercase_ , PIL.Image.Image ): A__ = no_op_if_value_is_null(lowercase_ ) return [obj_to_image_dict_func(lowercase_ ) for obj in objs] else: return objs else: return objs
87
1
def SCREAMING_SNAKE_CASE ( lowercase_ = 100 ) -> int: """simple docstring""" A__ = (n * (n + 1) // 2) ** 2 A__ = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(F'''{solution() = }''')
87
from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) UpperCAmelCase__ = ( { '''feature-extraction''': TFMobileBertModel, '''fill-mask''': TFMobileBertForMaskedLM, '''question-answering''': TFMobileBertForQuestionAnswering, '''text-classification''': TFMobileBertForSequenceClassification, '''token-classification''': TFMobileBertForTokenClassification, '''zero-shot''': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=False) ->Optional[Any]: '''simple docstring''' A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__) if return_labels: if model_class in get_values(UpperCAmelCase__): A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa) return inputs_dict class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : List[str]=32 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]=512 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Tuple=None , ) ->Any: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope A__ = embedding_size def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) A__ = ids_tensor([self.batch_size] , self.num_choices) A__ = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]) ->Any: '''simple docstring''' A__ = TFMobileBertModel(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) A__ = [input_ids, input_mask] A__ = model(UpperCAmelCase__) A__ = model(UpperCAmelCase__) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple) ->Optional[Any]: '''simple docstring''' A__ = TFMobileBertForMaskedLM(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]) ->int: '''simple docstring''' A__ = TFMobileBertForNextSentencePrediction(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int) ->List[Any]: '''simple docstring''' A__ = TFMobileBertForPreTraining(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple) ->Dict: '''simple docstring''' A__ = self.num_labels A__ = TFMobileBertForSequenceClassification(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int) ->Dict: '''simple docstring''' A__ = self.num_choices A__ = TFMobileBertForMultipleChoice(config=UpperCAmelCase__) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]) ->int: '''simple docstring''' A__ = self.num_labels A__ = TFMobileBertForTokenClassification(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ = TFMobileBertForQuestionAnswering(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE ( self : Any) ->str: '''simple docstring''' A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = TFMobileBertModelTest.TFMobileBertModelTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' for model_name in ["google/mobilebert-uncased"]: A__ = TFMobileBertModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) @require_tf class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any: '''simple docstring''' A__ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''') A__ = tf.constant([[0, 1, 2, 3, 4, 5]]) A__ = model(UpperCAmelCase__)[0] A__ = [1, 6, 30_522] self.assertEqual(output.shape , UpperCAmelCase__) A__ = tf.constant( [ [ [-4.5919547, -9.248295, -9.645256], [-6.7306175, -6.440284, -6.6052837], [-7.2743506, -6.7847915, -6.024673], ] ]) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)
87
1
def SCREAMING_SNAKE_CASE ( lowercase_ = 1_000 ) -> int: """simple docstring""" A__ = 3 A__ = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F'''{solution() = }''')
87
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : str=224 , UpperCAmelCase__ : str=30 , UpperCAmelCase__ : Tuple=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Tuple=[0.5, 0.5, 0.5] , ) ->str: '''simple docstring''' A__ = size if size is not None else {'''height''': 18, '''width''': 18} A__ = parent A__ = batch_size A__ = num_channels A__ = image_size A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : List[str]) ->str: '''simple docstring''' A__ = EfficientFormerImageProcessorTester(self) @property def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''')) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image) # Test not batched input A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray) # Test not batched input A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor) # Test not batched input A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , )
87
1
import torch def SCREAMING_SNAKE_CASE ( ) -> Optional[int]: """simple docstring""" if torch.cuda.is_available(): A__ = torch.cuda.device_count() else: A__ = 0 print(f"""Successfully ran on {num_gpus} GPUs""" ) if __name__ == "__main__": main()
87
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance _lowerCamelCase : Dict = 6_378_137.0 _lowerCamelCase : Union[str, Any] = 6_356_752.314_245 _lowerCamelCase : List[Any] = 6378137 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> float: """simple docstring""" A__ = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) ) A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius A__ = haversine_distance(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) / EQUATORIAL_RADIUS # Intermediate P and Q values A__ = (b_lata + b_lata) / 2 A__ = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) A__ = (sin(lowercase_ ) ** 2) * (cos(lowercase_ ) ** 2) A__ = cos(sigma / 2 ) ** 2 A__ = (sigma - sin(lowercase_ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) A__ = (cos(lowercase_ ) ** 2) * (sin(lowercase_ ) ** 2) A__ = sin(sigma / 2 ) ** 2 A__ = (sigma + sin(lowercase_ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
87
1
from __future__ import annotations import requests def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict: """simple docstring""" A__ = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty""" return requests.get(lowercase_ ).json() def SCREAMING_SNAKE_CASE ( lowercase_ = 10 ) -> list[dict]: """simple docstring""" A__ = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty''' A__ = requests.get(lowercase_ ).json()[:max_stories] return [get_hackernews_story(lowercase_ ) for story_id in story_ids] def SCREAMING_SNAKE_CASE ( lowercase_ = 10 ) -> str: """simple docstring""" A__ = hackernews_top_stories(lowercase_ ) return "\n".join('''* [{title}]({url})'''.format(**lowercase_ ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
87
import heapq import sys import numpy as np _lowerCamelCase : Any = tuple[int, int] class UpperCamelCase_ : '''simple docstring''' def __init__( self : Any) ->str: '''simple docstring''' A__ = [] A__ = set() def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' if not self.empty(): return self.elements[0][0] else: return float('''inf''') def SCREAMING_SNAKE_CASE ( self : Tuple) ->str: '''simple docstring''' return len(self.elements) == 0 def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any]) ->List[str]: '''simple docstring''' if item not in self.set: heapq.heappush(self.elements , (priority, item)) self.set.add(UpperCAmelCase__) else: # update # print("update", item) A__ = [] ((A__) , (A__)) = heapq.heappop(self.elements) while x != item: temp.append((pri, x)) ((A__) , (A__)) = heapq.heappop(self.elements) temp.append((priority, item)) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[Any]) ->Union[str, Any]: '''simple docstring''' if item in self.set: self.set.remove(UpperCAmelCase__) A__ = [] ((A__) , (A__)) = heapq.heappop(self.elements) while x != item: temp.append((pro, x)) ((A__) , (A__)) = heapq.heappop(self.elements) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy)) def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' return self.elements[0][1] def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' ((A__) , (A__)) = heapq.heappop(self.elements) self.set.remove(UpperCAmelCase__) return (priority, item) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = np.array(lowercase_ ) A__ = np.array(lowercase_ ) return np.linalg.norm(a - b ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" return consistent_heuristic(lowercase_ , lowercase_ ) // t def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: """simple docstring""" A__ = g_function[start] + Wa * heuristics[i](lowercase_ , lowercase_ ) return ans def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = np.chararray((n, n) ) for i in range(lowercase_ ): for j in range(lowercase_ ): A__ = '''*''' for i in range(lowercase_ ): for j in range(lowercase_ ): if (j, (n - 1) - i) in blocks: A__ = '''#''' A__ = '''-''' A__ = back_pointer[goal] while x != start: ((A__) , (A__)) = x # print(x) A__ = '''-''' A__ = back_pointer[x] A__ = '''-''' for i in range(lowercase_ ): for j in range(lowercase_ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) A__ = back_pointer[goal] while x != start: print(lowercase_ , end=''' ''' ) A__ = back_pointer[x] print(lowercase_ ) sys.exit() def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Union[str, Any]: """simple docstring""" for itera in range(lowercase_ ): open_list[itera].remove_element(lowercase_ ) # print("s", s) # print("j", j) ((A__) , (A__)) = s A__ = (x - 1, y) A__ = (x + 1, y) A__ = (x, y + 1) A__ = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(lowercase_ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(lowercase_ ) A__ = -1 A__ = float('''inf''' ) if valid(lowercase_ ) and g_function[neighbours] > g_function[s] + 1: A__ = g_function[s] + 1 A__ = s if neighbours not in close_list_anchor: open_list[0].put(lowercase_ , key(lowercase_ , 0 , lowercase_ , lowercase_ ) ) if neighbours not in close_list_inad: for var in range(1 , lowercase_ ): if key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) <= Wa * key( lowercase_ , 0 , lowercase_ , lowercase_ ): open_list[j].put( lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) ) def SCREAMING_SNAKE_CASE ( ) -> Optional[int]: """simple docstring""" A__ = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list _lowerCamelCase : Dict = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} _lowerCamelCase : Optional[Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] _lowerCamelCase : Optional[int] = make_common_ground() _lowerCamelCase : Optional[Any] = blocks_blk # hyper parameters _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : List[Any] = 20 _lowerCamelCase : Any = 3 # one consistent and two other inconsistent # start and end destination _lowerCamelCase : str = (0, 0) _lowerCamelCase : Tuple = (n - 1, n - 1) _lowerCamelCase : int = 1 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = {start: 0, goal: float('''inf''' )} A__ = {start: -1, goal: -1} A__ = [] A__ = set() for i in range(lowercase_ ): open_list.append(PriorityQueue() ) open_list[i].put(lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) ) A__ = [] A__ = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , lowercase_ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(lowercase_ , lowercase_ , lowercase_ ) else: A__ , A__ = open_list[i].top_show() visited.add(lowercase_ ) expand_state( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) close_list_inad.append(lowercase_ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(lowercase_ , lowercase_ , lowercase_ ) else: A__ = open_list[0].top_show() visited.add(lowercase_ ) expand_state( lowercase_ , 0 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) close_list_anchor.append(lowercase_ ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(lowercase_ ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
87
1
import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Optional[Any] = [ """word_embeddings_layernorm.weight""", """word_embeddings_layernorm.bias""", """input_layernorm.weight""", """input_layernorm.bias""", """post_attention_layernorm.weight""", """post_attention_layernorm.bias""", """self_attention.dense.bias""", """mlp.dense_4h_to_h.bias""", """ln_f.weight""", """ln_f.bias""", ] _lowerCamelCase : int = [ """mlp.dense_4h_to_h.weight""", """self_attention.dense.weight""", ] def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple: """simple docstring""" A__ = { '''word_embeddings.weight''': '''word_embeddings.weight''', '''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''', '''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''', '''weight''': '''ln_f.weight''', '''bias''': '''ln_f.bias''', } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks A__ = int(re.match(R'''.*layer_(\d*).*''' , lowercase_ )[1] ) layer_number -= 3 return f"""h.{layer_number}.""" + key def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" if dtype == torch.bool: return 1 / 8 A__ = re.search(R'''[^\d](\d+)$''' , str(lowercase_ ) ) if bit_search is None: raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" ) A__ = int(bit_search.groups()[0] ) return bit_size // 8 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]: """simple docstring""" if bloom_config_file == "": A__ = BloomConfig() else: A__ = BloomConfig.from_json_file(lowercase_ ) if shard_model: A__ = os.listdir(lowercase_ ) A__ = sorted(filter(lambda lowercase_ : s.startswith('''layer''' ) and "model_00" in s , lowercase_ ) ) A__ = {'''weight_map''': {}, '''metadata''': {}} A__ = 0 A__ = None A__ = BloomConfig() for j, file in enumerate(lowercase_ ): print('''Processing file: {}'''.format(lowercase_ ) ) A__ = None for i in range(lowercase_ ): # load all TP files A__ = file.replace('''model_00''' , f"""model_0{i}""" ) A__ = torch.load(os.path.join(lowercase_ , lowercase_ ) , map_location='''cpu''' ) # Rename keys in the transformers names A__ = list(temp.keys() ) for key in keys: A__ = temp.pop(lowercase_ ) if tensors is None: A__ = temp else: for key in tensors.keys(): if any(key.endswith(lowercase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel A__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks A__ = torch.cat([tensors[key], temp[key]] , dim=lowercase_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(lowercase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): A__ = tensors[key] / pretraining_tp torch.save( lowercase_ , os.path.join( lowercase_ , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(lowercase_ ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): A__ = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: A__ = '''pytorch_model_{}-of-{}.bin'''.format( str(j + 1 ).zfill(5 ) , str(len(lowercase_ ) ).zfill(5 ) ) A__ = BloomConfig() A__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME A__ = total_size with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) with open(os.path.join(lowercase_ , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f: A__ = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + '''\n''' f.write(lowercase_ ) else: A__ = BloomModel(lowercase_ ) A__ = os.listdir(lowercase_ ) A__ = sorted(filter(lambda lowercase_ : s.startswith('''layer''' ) and "model_00" in s , lowercase_ ) ) A__ = None for i, file in enumerate(lowercase_ ): A__ = None for i in range(lowercase_ ): # load all TP files A__ = file.replace('''model_00''' , f"""model_0{i}""" ) A__ = torch.load(os.path.join(lowercase_ , lowercase_ ) , map_location='''cpu''' ) # Rename keys in the transformers names A__ = list(temp.keys() ) for key in keys: A__ = temp.pop(lowercase_ ) if tensors is None: A__ = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(lowercase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel A__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks A__ = torch.cat([tensors[key], temp[key]] , dim=lowercase_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(lowercase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): A__ = tensors[key] / pretraining_tp A__ = model.load_state_dict(lowercase_ , strict=lowercase_ ) assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected""" if missing_keys is None: A__ = set(other_keys.missing_keys ) else: A__ = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, f"""The keys {missing_keys} are missing""" # Save pytorch-model os.makedirs(lowercase_ , exist_ok=lowercase_ ) A__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME A__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" ) if config.torch_dtype is not None: A__ = model.to(config.torch_dtype ) torch.save(model.state_dict() , lowercase_ ) print(f"""Save configuration file to {pytorch_config_dump_path}""" ) with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--bloom_checkpoint_path""", default=None, type=str, required=True, help="""Path to the Megatron-LM checkpoint path.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--bloom_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--shard_model""", action="""store_true""", help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""", ) parser.add_argument( """--pretraining_tp""", default=4, type=int, help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""", ) _lowerCamelCase : Any = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
87
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _lowerCamelCase : Optional[Any] = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine""" def SCREAMING_SNAKE_CASE ( ) -> Dict: """simple docstring""" A__ = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: A__ = get_sagemaker_input() else: A__ = get_cluster_input() return config def SCREAMING_SNAKE_CASE ( lowercase_=None ) -> List[Any]: """simple docstring""" if subparsers is not None: A__ = subparsers.add_parser('''config''' , description=lowercase_ ) else: A__ = argparse.ArgumentParser('''Accelerate config command''' , description=lowercase_ ) parser.add_argument( '''--config_file''' , default=lowercase_ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=lowercase_ ) return parser def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = get_user_input() if args.config_file is not None: A__ = args.config_file else: if not os.path.isdir(lowercase_ ): os.makedirs(lowercase_ ) A__ = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(lowercase_ ) else: config.to_yaml_file(lowercase_ ) print(f"""accelerate configuration saved at {config_file}""" ) def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]: """simple docstring""" A__ = config_command_parser() A__ = parser.parse_args() config_command(lowercase_ ) if __name__ == "__main__": main()
87
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : Dict = { """EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''gpt_neox''' def __init__( self : Any , UpperCAmelCase__ : Optional[int]=50_432 , UpperCAmelCase__ : str=6_144 , UpperCAmelCase__ : List[Any]=44 , UpperCAmelCase__ : Optional[int]=64 , UpperCAmelCase__ : Any=24_576 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : int=0.25 , UpperCAmelCase__ : int=10_000 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : int=2_048 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : List[str]=1e-5 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : int , ) ->str: '''simple docstring''' super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__) A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = rotary_pct A__ = rotary_emb_base A__ = attention_dropout A__ = hidden_dropout A__ = classifier_dropout A__ = initializer_range A__ = layer_norm_eps A__ = use_cache A__ = tie_word_embeddings A__ = use_parallel_residual A__ = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''') def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , UpperCAmelCase__) or len(self.rope_scaling) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f"""got {self.rope_scaling}""") A__ = self.rope_scaling.get('''type''' , UpperCAmelCase__) A__ = self.rope_scaling.get('''factor''' , UpperCAmelCase__) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""") if rope_scaling_factor is None or not isinstance(UpperCAmelCase__ , UpperCAmelCase__) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""")
87
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() _lowerCamelCase : int = logging.get_logger("""transformers.models.speecht5""") def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple: """simple docstring""" hf_model.apply_weight_norm() A__ = checkpoint['''input_conv.weight_g'''] A__ = checkpoint['''input_conv.weight_v'''] A__ = checkpoint['''input_conv.bias'''] for i in range(len(config.upsample_rates ) ): A__ = checkpoint[f"""upsamples.{i}.1.weight_g"""] A__ = checkpoint[f"""upsamples.{i}.1.weight_v"""] A__ = checkpoint[f"""upsamples.{i}.1.bias"""] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""] A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""] A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""] A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""] A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""] A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""] A__ = checkpoint['''output_conv.1.weight_g'''] A__ = checkpoint['''output_conv.1.weight_v'''] A__ = checkpoint['''output_conv.1.bias'''] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , ) -> str: """simple docstring""" if config_path is not None: A__ = SpeechTaHifiGanConfig.from_pretrained(lowercase_ ) else: A__ = SpeechTaHifiGanConfig() A__ = SpeechTaHifiGan(lowercase_ ) A__ = torch.load(lowercase_ ) load_weights(orig_checkpoint['''model''']['''generator'''] , lowercase_ , lowercase_ ) A__ = np.load(lowercase_ ) A__ = stats[0].reshape(-1 ) A__ = stats[1].reshape(-1 ) A__ = torch.from_numpy(lowercase_ ).float() A__ = torch.from_numpy(lowercase_ ).float() model.save_pretrained(lowercase_ ) if repo_id: print('''Pushing to the hub...''' ) model.push_to_hub(lowercase_ ) if __name__ == "__main__": _lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) _lowerCamelCase : List[str] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
87
1
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" A__ = filter(lambda lowercase_ : p.requires_grad , model.parameters() ) A__ = sum([np.prod(p.size() ) for p in model_parameters] ) return params _lowerCamelCase : Any = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any: """simple docstring""" if metric == "rouge2": A__ = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": A__ = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": A__ = '''{val_avg_em:.4f}-{step_count}''' else: raise NotImplementedError( f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" ''' function.''' ) A__ = ModelCheckpoint( dirpath=lowercase_ , filename=lowercase_ , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[Any]: """simple docstring""" return EarlyStopping( monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=lowercase_ , verbose=lowercase_ , ) class UpperCamelCase_ ( pl.Callback ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict) ->Optional[int]: '''simple docstring''' A__ = {f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)} pl_module.logger.log_metrics(UpperCAmelCase__) @rank_zero_only def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : pl.Trainer , UpperCAmelCase__ : pl.LightningModule , UpperCAmelCase__ : str , UpperCAmelCase__ : int=True) ->None: '''simple docstring''' logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""") A__ = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']}) # Log results A__ = Path(pl_module.hparams.output_dir) if type_path == "test": A__ = od / '''test_results.txt''' A__ = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. A__ = od / f"""{type_path}_results/{trainer.global_step:05d}.txt""" A__ = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=UpperCAmelCase__) generations_file.parent.mkdir(exist_ok=UpperCAmelCase__) with open(UpperCAmelCase__ , '''a+''') as writer: for key in sorted(UpperCAmelCase__): if key in ["log", "progress_bar", "preds"]: continue A__ = metrics[key] if isinstance(UpperCAmelCase__ , torch.Tensor): A__ = val.item() A__ = f"""{key}: {val:.6f}\n""" writer.write(UpperCAmelCase__) if not save_generations: return if "preds" in metrics: A__ = '''\n'''.join(metrics['''preds''']) generations_file.open('''w+''').write(UpperCAmelCase__) @rank_zero_only def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Any) ->Optional[int]: '''simple docstring''' try: A__ = pl_module.model.model.num_parameters() except AttributeError: A__ = pl_module.model.num_parameters() A__ = count_trainable_parameters(UpperCAmelCase__) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6}) @rank_zero_only def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : pl.Trainer , UpperCAmelCase__ : pl.LightningModule) ->List[str]: '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path) return self._write_logs(UpperCAmelCase__ , UpperCAmelCase__ , '''test''') @rank_zero_only def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : pl.Trainer , UpperCAmelCase__ : List[str]) ->Tuple: '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
87
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase_ : '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : Dict=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=50 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=None , ) ->Union[str, Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = initializer_range A__ = use_labels A__ = scope def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = self.get_config() return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE ( self : int) ->int: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = self.prepare_config_and_inputs() A__ = True A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] , ) ->Dict: '''simple docstring''' A__ = BertGenerationEncoder(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__) A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] , ) ->Dict: '''simple docstring''' A__ = True A__ = BertGenerationEncoder(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , ) A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[int] , ) ->Any: '''simple docstring''' A__ = True A__ = True A__ = BertGenerationDecoder(config=UpperCAmelCase__).to(UpperCAmelCase__).eval() # first forward pass A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , ) A__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size) A__ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens] , dim=-1) A__ = torch.cat([input_mask, next_mask] , dim=-1) A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0] A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0] # select random slice A__ = ids_tensor((1,) , output_from_past.shape[-1]).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3)) def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , *UpperCAmelCase__ : List[str] , ) ->List[Any]: '''simple docstring''' A__ = BertGenerationDecoder(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ , A__ , A__ , A__ = self.prepare_config_and_inputs() A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () UpperCAmelCase__ = (BertGenerationDecoder,) if is_torch_available() else () UpperCAmelCase__ = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' A__ = BertGenerationEncoderTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() A__ = '''bert''' self.model_tester.create_and_check_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() A__ = None self.model_tester.create_and_check_model_as_decoder( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]: '''simple docstring''' A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') self.assertIsNotNone(UpperCAmelCase__) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]]) with torch.no_grad(): A__ = model(UpperCAmelCase__)[0] A__ = torch.Size([1, 8, 1_024]) self.assertEqual(output.shape , UpperCAmelCase__) A__ = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' A__ = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]]) with torch.no_grad(): A__ = model(UpperCAmelCase__)[0] A__ = torch.Size([1, 8, 50_358]) self.assertEqual(output.shape , UpperCAmelCase__) A__ = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4))
87
1
from __future__ import annotations class UpperCamelCase_ : '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase__ : int = 0) ->List[str]: '''simple docstring''' A__ = key def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int) ->list[str]: '''simple docstring''' assert isinstance(UpperCAmelCase__ , UpperCAmelCase__) and isinstance(UpperCAmelCase__ , UpperCAmelCase__) A__ = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(UpperCAmelCase__) ^ key) for ch in content] def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : int) ->list[str]: '''simple docstring''' assert isinstance(UpperCAmelCase__ , UpperCAmelCase__) and isinstance(UpperCAmelCase__ , UpperCAmelCase__) A__ = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(UpperCAmelCase__) ^ key) for ch in content] def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : int = 0) ->str: '''simple docstring''' assert isinstance(UpperCAmelCase__ , UpperCAmelCase__) and isinstance(UpperCAmelCase__ , UpperCAmelCase__) A__ = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned A__ = '''''' for ch in content: ans += chr(ord(UpperCAmelCase__) ^ key) return ans def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : int = 0) ->str: '''simple docstring''' assert isinstance(UpperCAmelCase__ , UpperCAmelCase__) and isinstance(UpperCAmelCase__ , UpperCAmelCase__) A__ = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned A__ = '''''' for ch in content: ans += chr(ord(UpperCAmelCase__) ^ key) return ans def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int = 0) ->bool: '''simple docstring''' assert isinstance(UpperCAmelCase__ , UpperCAmelCase__) and isinstance(UpperCAmelCase__ , UpperCAmelCase__) try: with open(UpperCAmelCase__) as fin, open('''encrypt.out''' , '''w+''') as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(UpperCAmelCase__ , UpperCAmelCase__)) except OSError: return False return True def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int) ->bool: '''simple docstring''' assert isinstance(UpperCAmelCase__ , UpperCAmelCase__) and isinstance(UpperCAmelCase__ , UpperCAmelCase__) try: with open(UpperCAmelCase__) as fin, open('''decrypt.out''' , '''w+''') as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(UpperCAmelCase__ , UpperCAmelCase__)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
87
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging _lowerCamelCase : int = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict: """simple docstring""" A__ = set() A__ = [] def parse_line(lowercase_ ): for line in fp: if isinstance(lowercase_ , lowercase_ ): A__ = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(lowercase_ ) > 0: A__ = '''\n'''.join(lowercase_ ) # Only keep the warnings specified in `targets` if any(f""": {x}: """ in warning for x in targets ): selected_warnings.add(lowercase_ ) buffer.clear() continue else: A__ = line.strip() buffer.append(lowercase_ ) if from_gh: for filename in os.listdir(lowercase_ ): A__ = os.path.join(lowercase_ , lowercase_ ) if not os.path.isdir(lowercase_ ): # read the file if filename != "warnings.txt": continue with open(lowercase_ ) as fp: parse_line(lowercase_ ) else: try: with zipfile.ZipFile(lowercase_ ) as z: for filename in z.namelist(): if not os.path.isdir(lowercase_ ): # read the file if filename != "warnings.txt": continue with z.open(lowercase_ ) as fp: parse_line(lowercase_ ) except Exception: logger.warning( f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" ) return selected_warnings def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = set() A__ = [os.path.join(lowercase_ , lowercase_ ) for p in os.listdir(lowercase_ ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(lowercase_ , lowercase_ ) ) return selected_warnings if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" return values.split(''',''' ) _lowerCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) _lowerCamelCase : List[Any] = parser.parse_args() _lowerCamelCase : List[str] = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links _lowerCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts _lowerCamelCase : Any = extract_warnings(args.output_dir, args.targets) _lowerCamelCase : Optional[Any] = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
87
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCamelCase : List[str] = { """google/pix2struct-textcaps-base""": ( """https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json""" ), } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''pix2struct_text_model''' UpperCAmelCase__ = ['''past_key_values'''] UpperCAmelCase__ = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Dict , UpperCAmelCase__ : Any=50_244 , UpperCAmelCase__ : Tuple=768 , UpperCAmelCase__ : Union[str, Any]=64 , UpperCAmelCase__ : Union[str, Any]=2_048 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Optional[int]=128 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : List[str]=1e-6 , UpperCAmelCase__ : Dict=1.0 , UpperCAmelCase__ : Any="gelu_new" , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Any=True , **UpperCAmelCase__ : Union[str, Any] , ) ->Union[str, Any]: '''simple docstring''' A__ = vocab_size A__ = hidden_size A__ = d_kv A__ = d_ff A__ = num_layers A__ = num_heads A__ = relative_attention_num_buckets A__ = relative_attention_max_distance A__ = dropout_rate A__ = layer_norm_epsilon A__ = initializer_factor A__ = use_cache A__ = eos_token_id A__ = decoder_start_token_id # for backwards compatibility A__ = dense_act_fn super().__init__( pad_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , tie_word_embeddings=UpperCAmelCase__ , is_decoder=UpperCAmelCase__ , **UpperCAmelCase__ , ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Any) ->"PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCAmelCase__) A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''') == "pix2struct": A__ = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''pix2struct_vision_model''' def __init__( self : Optional[Any] , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : int=2_048 , UpperCAmelCase__ : int=64 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[Any]="gelu_new" , UpperCAmelCase__ : Union[str, Any]=1e-6 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Any=1e-10 , UpperCAmelCase__ : Any=1.0 , UpperCAmelCase__ : Optional[int]=4_096 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : int=128 , **UpperCAmelCase__ : Dict , ) ->Optional[Any]: '''simple docstring''' super().__init__(**UpperCAmelCase__) A__ = hidden_size A__ = patch_embed_hidden_size A__ = d_ff A__ = dropout_rate A__ = num_hidden_layers A__ = num_attention_heads A__ = initializer_range A__ = initializer_factor A__ = attention_dropout A__ = layer_norm_eps A__ = dense_act_fn A__ = seq_len A__ = relative_attention_num_buckets A__ = relative_attention_max_distance A__ = d_kv @classmethod def SCREAMING_SNAKE_CASE ( cls : Tuple , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Tuple) ->"PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCAmelCase__) A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''') == "pix2struct": A__ = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''pix2struct''' UpperCAmelCase__ = True def __init__( self : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : str=1.0 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Dict=True , **UpperCAmelCase__ : List[Any] , ) ->Union[str, Any]: '''simple docstring''' super().__init__(tie_word_embeddings=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__) if text_config is None: A__ = {} logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''') if vision_config is None: A__ = {} logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''') A__ = PixaStructTextConfig(**UpperCAmelCase__) A__ = PixaStructVisionConfig(**UpperCAmelCase__) A__ = self.text_config.decoder_start_token_id A__ = self.text_config.pad_token_id A__ = self.text_config.eos_token_id A__ = initializer_factor A__ = initializer_range A__ = self.initializer_range A__ = self.initializer_range A__ = is_vqa @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , UpperCAmelCase__ : PixaStructTextConfig , UpperCAmelCase__ : PixaStructVisionConfig , **UpperCAmelCase__ : Dict) ->Tuple: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.text_config.to_dict() A__ = self.vision_config.to_dict() A__ = self.__class__.model_type return output
87
class UpperCamelCase_ : # Public class to implement a graph '''simple docstring''' def __init__( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->None: '''simple docstring''' A__ = row A__ = col A__ = graph def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->bool: '''simple docstring''' return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->None: '''simple docstring''' A__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order A__ = [-1, 0, 1, -1, 1, -1, 0, 1] A__ = True # Make those cells visited for k in range(8): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: # And finally, count all islands. '''simple docstring''' A__ = [[False for j in range(self.COL)] for i in range(self.ROW)] A__ = 0 for i in range(self.ROW): for j in range(self.COL): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) count += 1 return count
87
1
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Any , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : Optional[int] , ) ->int: '''simple docstring''' super().__init__() A__ = value_function A__ = unet A__ = scheduler A__ = env A__ = env.get_dataset() A__ = {} for key in self.data.keys(): try: A__ = self.data[key].mean() except: # noqa: E722 pass A__ = {} for key in self.data.keys(): try: A__ = self.data[key].std() except: # noqa: E722 pass A__ = env.observation_space.shape[0] A__ = env.action_space.shape[0] def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : int) ->List[str]: '''simple docstring''' return (x_in - self.means[key]) / self.stds[key] def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]) ->Any: '''simple docstring''' return x_in * self.stds[key] + self.means[key] def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any]) ->Dict: '''simple docstring''' if type(UpperCAmelCase__) is dict: return {k: self.to_torch(UpperCAmelCase__) for k, v in x_in.items()} elif torch.is_tensor(UpperCAmelCase__): return x_in.to(self.unet.device) return torch.tensor(UpperCAmelCase__ , device=self.unet.device) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]) ->Any: '''simple docstring''' for key, val in cond.items(): A__ = val.clone() return x_in def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str) ->List[Any]: '''simple docstring''' A__ = x.shape[0] A__ = None for i in tqdm.tqdm(self.scheduler.timesteps): # create batch of timesteps to pass into model A__ = torch.full((batch_size,) , UpperCAmelCase__ , device=self.unet.device , dtype=torch.long) for _ in range(UpperCAmelCase__): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models A__ = self.value_function(x.permute(0 , 2 , 1) , UpperCAmelCase__).sample A__ = torch.autograd.grad([y.sum()] , [x])[0] A__ = self.scheduler._get_variance(UpperCAmelCase__) A__ = torch.exp(0.5 * posterior_variance) A__ = model_std * grad A__ = 0 A__ = x.detach() A__ = x + scale * grad A__ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim) A__ = self.unet(x.permute(0 , 2 , 1) , UpperCAmelCase__).sample.permute(0 , 2 , 1) # TODO: verify deprecation of this kwarg A__ = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , predict_epsilon=UpperCAmelCase__)['''prev_sample'''] # apply conditions to the trajectory (set the initial state) A__ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim) A__ = self.to_torch(UpperCAmelCase__) return x, y def __call__( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any]=64 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Dict=0.1) ->List[str]: '''simple docstring''' A__ = self.normalize(UpperCAmelCase__ , '''observations''') A__ = obs[None].repeat(UpperCAmelCase__ , axis=0) A__ = {0: self.to_torch(UpperCAmelCase__)} A__ = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) A__ = randn_tensor(UpperCAmelCase__ , device=self.unet.device) A__ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim) A__ = self.to_torch(UpperCAmelCase__) # run the diffusion process A__ , A__ = self.run_diffusion(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) # sort output trajectories by value A__ = y.argsort(0 , descending=UpperCAmelCase__).squeeze() A__ = x[sorted_idx] A__ = sorted_values[:, :, : self.action_dim] A__ = actions.detach().cpu().numpy() A__ = self.de_normalize(UpperCAmelCase__ , key='''actions''') # select the action with the highest value if y is not None: A__ = 0 else: # if we didn't run value guiding, select a random action A__ = np.random.randint(0 , UpperCAmelCase__) A__ = denorm_actions[selected_index, 0] return denorm_actions
87
from __future__ import annotations import requests _lowerCamelCase : str = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 1 , lowercase_ = "new" , lowercase_ = None ) -> dict: """simple docstring""" A__ = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(lowercase_ ) - valid_terms ) ): A__ = f"""Invalid search term: {invalid_search_terms}""" raise ValueError(lowercase_ ) A__ = requests.get( f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , ) if response.status_code == 429: raise requests.HTTPError A__ = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(lowercase_ )} A__ = {} for id_ in range(lowercase_ ): A__ = { item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
87
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCamelCase : str = { """configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""], """tokenization_m2m_100""": ["""M2M100Tokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = [ """M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""", """M2M100ForConditionalGeneration""", """M2M100Model""", """M2M100PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys _lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = JukeboxTokenizer UpperCAmelCase__ = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' import torch A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''') A__ = tokenizer(**self.metas)['''input_ids'''] # fmt: off A__ = [ torch.tensor([[ 0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]]), torch.tensor([[0, 0, 0, 1_069, 11]]), torch.tensor([[0, 0, 0, 1_069, 11]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2])) @require_torch def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]: '''simple docstring''' import torch A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''') A__ = tokenizer(**self.metas)['''input_ids'''] # fmt: off A__ = [ torch.tensor([[ 0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]]), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
87
1
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() _lowerCamelCase : List[str] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = WavaVecaForSequenceClassification.from_pretrained(lowercase_ , config=lowercase_ ) A__ = downstream_dict['''projector.weight'''] A__ = downstream_dict['''projector.bias'''] A__ = downstream_dict['''model.post_net.linear.weight'''] A__ = downstream_dict['''model.post_net.linear.bias'''] return model def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]: """simple docstring""" A__ = WavaVecaForAudioFrameClassification.from_pretrained(lowercase_ , config=lowercase_ ) A__ = downstream_dict['''model.linear.weight'''] A__ = downstream_dict['''model.linear.bias'''] return model def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple: """simple docstring""" A__ = WavaVecaForXVector.from_pretrained(lowercase_ , config=lowercase_ ) A__ = downstream_dict['''connector.weight'''] A__ = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): A__ = downstream_dict[ f"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] A__ = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] A__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] A__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] A__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] A__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] A__ = downstream_dict['''objective.W'''] return model @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = torch.load(lowercase_ , map_location='''cpu''' ) A__ = checkpoint['''Downstream'''] A__ = WavaVecaConfig.from_pretrained(lowercase_ ) A__ = WavaVecaFeatureExtractor.from_pretrained( lowercase_ , return_attention_mask=lowercase_ , do_normalize=lowercase_ ) A__ = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): A__ = convert_classification(lowercase_ , lowercase_ , lowercase_ ) elif arch.endswith('''ForAudioFrameClassification''' ): A__ = convert_diarization(lowercase_ , lowercase_ , lowercase_ ) elif arch.endswith('''ForXVector''' ): A__ = convert_xvector(lowercase_ , lowercase_ , lowercase_ ) else: raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: A__ = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(lowercase_ ) hf_model.save_pretrained(lowercase_ ) if __name__ == "__main__": _lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument( """--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model.""" ) parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""") parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""") _lowerCamelCase : List[str] = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
87
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : List[str] = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''openai-gpt''' UpperCAmelCase__ = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict=40_478 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=1e-5 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Any="cls_index" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=0.1 , **UpperCAmelCase__ : Dict , ) ->Any: '''simple docstring''' A__ = vocab_size A__ = n_positions A__ = n_embd A__ = n_layer A__ = n_head A__ = afn A__ = resid_pdrop A__ = embd_pdrop A__ = attn_pdrop A__ = layer_norm_epsilon A__ = initializer_range A__ = summary_type A__ = summary_use_proj A__ = summary_activation A__ = summary_first_dropout A__ = summary_proj_to_labels super().__init__(**UpperCAmelCase__)
87
1
import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : int=18 , UpperCAmelCase__ : Tuple=30 , UpperCAmelCase__ : int=400 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Tuple=True , ) ->List[str]: '''simple docstring''' A__ = size if size is not None else {'''height''': 18, '''width''': 18} A__ = parent A__ = batch_size A__ = num_channels A__ = image_size A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]: '''simple docstring''' return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8866443634033203, 0.6618829369544983, 0.3891746401786804], [-0.6042559146881104, -0.02295008860528469, 0.5423797369003296], ]), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ImageGPTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]: '''simple docstring''' A__ = ImageGPTImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase__ , '''clusters''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''')) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]: '''simple docstring''' A__ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18}) A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42}) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Tuple: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) A__ = json.loads(image_processor.to_json_string()) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCAmelCase__ , obj[key])) else: self.assertEqual(obj[key] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: A__ = os.path.join(UpperCAmelCase__ , '''image_processor.json''') image_processor_first.to_json_file(UpperCAmelCase__) A__ = self.image_processing_class.from_json_file(UpperCAmelCase__).to_dict() A__ = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCAmelCase__ , image_processor_second[key])) else: self.assertEqual(image_processor_first[key] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(UpperCAmelCase__) A__ = self.image_processing_class.from_pretrained(UpperCAmelCase__).to_dict() A__ = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCAmelCase__ , image_processor_second[key])) else: self.assertEqual(image_processor_first[key] , UpperCAmelCase__) @unittest.skip('''ImageGPT requires clusters at initialization''') def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" A__ = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' ) A__ = Image.open(dataset[4]['''file'''] ) A__ = Image.open(dataset[5]['''file'''] ) A__ = [imagea, imagea] return images @require_vision @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : List[str]) ->int: '''simple docstring''' A__ = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''') A__ = prepare_images() # test non-batched A__ = image_processing(images[0] , return_tensors='''pt''') self.assertIsInstance(encoding.input_ids , torch.LongTensor) self.assertEqual(encoding.input_ids.shape , (1, 1_024)) A__ = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCAmelCase__) # test batched A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''') self.assertIsInstance(encoding.input_ids , torch.LongTensor) self.assertEqual(encoding.input_ids.shape , (2, 1_024)) A__ = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCAmelCase__)
87
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
87
1
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" A__ = FileLock(str(tmpdir / '''foo.lock''' ) ) A__ = FileLock(str(tmpdir / '''foo.lock''' ) ) A__ = 0.01 with locka.acquire(): with pytest.raises(lowercase_ ): A__ = time.time() locka.acquire(lowercase_ ) assert time.time() - _start > timeout def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" A__ = '''a''' * 1_000 + '''.lock''' A__ = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(lowercase_ ) assert len(os.path.basename(locka._lock_file ) ) <= 255 A__ = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowercase_ ): locka.acquire(0 )
87
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Dict: """simple docstring""" if "." in tensor_name: A__ = tensor_name.split('''.''' ) for split in splits[:-1]: A__ = getattr(lowercase_ , lowercase_ ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) A__ = new_module A__ = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) A__ = tensor_name in module._buffers A__ = getattr(lowercase_ , lowercase_ ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) A__ = False A__ = False if is_buffer or not is_bitsandbytes_available(): A__ = False A__ = False else: A__ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) A__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: A__ = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: A__ = old_value.to(lowercase_ ) elif isinstance(lowercase_ , torch.Tensor ): A__ = value.to('''cpu''' ) if value.dtype == torch.inta: A__ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: A__ = torch.tensor(lowercase_ , device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , lowercase_ ) and fpaa_statistics is None: A__ = new_value.T A__ = old_value.__dict__ if is_abit: A__ = bnb.nn.IntaParams(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ ) elif is_abit: A__ = bnb.nn.Paramsabit(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ ) A__ = new_value if fpaa_statistics is not None: setattr(module.weight , '''SCB''' , fpaa_statistics.to(lowercase_ ) ) else: if value is None: A__ = old_value.to(lowercase_ ) elif isinstance(lowercase_ , torch.Tensor ): A__ = value.to(lowercase_ ) else: A__ = torch.tensor(lowercase_ , device=lowercase_ ) if is_buffer: A__ = new_value else: A__ = nn.Parameter(lowercase_ , requires_grad=old_value.requires_grad ) A__ = new_value def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False ) -> Dict: """simple docstring""" for name, module in model.named_children(): if current_key_name is None: A__ = [] current_key_name.append(lowercase_ ) if (isinstance(lowercase_ , nn.Linear ) or isinstance(lowercase_ , lowercase_ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(lowercase_ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(lowercase_ , lowercase_ ): A__ , A__ = module.weight.shape else: A__ = module.in_features A__ = module.out_features if quantization_config.quantization_method() == "llm_int8": A__ = bnb.nn.LinearabitLt( lowercase_ , lowercase_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) A__ = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: A__ = bnb.nn.Linearabit( lowercase_ , lowercase_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) A__ = True # Store the module class in case we need to transpose the weight later A__ = type(lowercase_ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(lowercase_ ) if len(list(module.children() ) ) > 0: A__ , A__ = _replace_with_bnb_linear( lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_been_replaced=lowercase_ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Tuple: """simple docstring""" A__ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert A__ , A__ = _replace_with_bnb_linear( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Dict: """simple docstring""" warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , lowercase_ , ) return replace_with_bnb_linear(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Optional[Any]: """simple docstring""" warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , lowercase_ , ) return set_module_quantized_tensor_to_device(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = deepcopy(lowercase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() A__ = find_tied_parameters(lowercase_ ) # For compatibility with Accelerate < 0.18 if isinstance(lowercase_ , lowercase_ ): A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: A__ = sum(lowercase_ , [] ) A__ = len(lowercase_ ) > 0 # Check if it is a base model A__ = not hasattr(lowercase_ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A__ = list(model.named_children() ) A__ = [list_modules[-1][0]] # add last module together with tied weights A__ = set(lowercase_ ) - set(lowercase_ ) A__ = list(set(lowercase_ ) ) + list(lowercase_ ) # remove ".weight" from the keys A__ = ['''.weight''', '''.bias'''] A__ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A__ = name.replace(lowercase_ , '''''' ) filtered_module_names.append(lowercase_ ) return filtered_module_names
87
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Union[str, Any] = { """google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""", """google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''mobilenet_v1''' def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Optional[Any]=224 , UpperCAmelCase__ : Optional[int]=1.0 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : Tuple="relu6" , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=0.999 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[int]=0.001 , **UpperCAmelCase__ : Dict , ) ->List[str]: '''simple docstring''' super().__init__(**UpperCAmelCase__) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''') A__ = num_channels A__ = image_size A__ = depth_multiplier A__ = min_depth A__ = hidden_act A__ = tf_padding A__ = classifier_dropout_prob A__ = initializer_range A__ = layer_norm_eps class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self : Any) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict([('''pixel_values''', {0: '''batch'''})]) @property def SCREAMING_SNAKE_CASE ( self : List[str]) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})]) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})]) @property def SCREAMING_SNAKE_CASE ( self : int) ->float: '''simple docstring''' return 1e-4
87
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) _lowerCamelCase : str = 299792458 # Symbols _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = symbols("""ct x y z""") def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float: """simple docstring""" if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float: """simple docstring""" return 1 / sqrt(1 - beta(lowercase_ ) ** 2 ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> np.ndarray: """simple docstring""" return np.array( [ [gamma(lowercase_ ), -gamma(lowercase_ ) * beta(lowercase_ ), 0, 0], [-gamma(lowercase_ ) * beta(lowercase_ ), gamma(lowercase_ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = None ) -> np.ndarray: """simple docstring""" if event is None: A__ = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(lowercase_ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: _lowerCamelCase : Tuple = transform(29979245) print("""Example of four vector: """) print(F'''ct\' = {four_vector[0]}''') print(F'''x\' = {four_vector[1]}''') print(F'''y\' = {four_vector[2]}''') print(F'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values _lowerCamelCase : int = {ct: c, x: 1, y: 1, z: 1} _lowerCamelCase : Any = [four_vector[i].subs(sub_dict) for i in range(4)] print(F'''\n{numerical_vector}''')
87
1
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" A__ = [1] A__ , A__ , A__ = 0, 0, 0 A__ = ugly_nums[ia] * 2 A__ = ugly_nums[ia] * 3 A__ = ugly_nums[ia] * 5 for _ in range(1 , lowercase_ ): A__ = min(lowercase_ , lowercase_ , lowercase_ ) ugly_nums.append(lowercase_ ) if next_num == next_a: ia += 1 A__ = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 A__ = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 A__ = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(F'''{ugly_numbers(200) = }''')
87
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list: """simple docstring""" if len(lowercase_ ) <= 1: return [tuple(lowercase_ )] A__ = [] def generate(lowercase_ , lowercase_ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowercase_ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A__ , A__ = arr[k - 1], arr[i] else: # k is odd A__ , A__ = arr[k - 1], arr[0] generate(k - 1 , lowercase_ ) generate(len(lowercase_ ) , lowercase_ ) return res if __name__ == "__main__": _lowerCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip() _lowerCamelCase : str = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
87
1
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib _lowerCamelCase : Optional[int] = get_logger() _lowerCamelCase : Optional[dict] = None class UpperCamelCase_ ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : Dict) ->List[Any]: '''simple docstring''' super().__init__(features=UpperCAmelCase__) import jax from jaxlib.xla_client import Device if isinstance(UpperCAmelCase__ , UpperCAmelCase__): raise ValueError( f"""Expected {device} to be a `str` not {type(UpperCAmelCase__)}, as `jaxlib.xla_extension.Device` """ '''is not serializable neither with `pickle` nor with `dill`. Instead you can surround ''' '''the device with `str()` to get its string identifier that will be internally mapped ''' '''to the actual `jaxlib.xla_extension.Device`.''') A__ = device if isinstance(UpperCAmelCase__ , UpperCAmelCase__) else str(jax.devices()[0]) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A__ = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys()): logger.warning( f"""Device with string identifier {self.device} not listed among the available """ f"""devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default """ f"""device: {str(jax.devices()[0])}.""") A__ = str(jax.devices()[0]) A__ = jnp_array_kwargs @staticmethod def SCREAMING_SNAKE_CASE ( ) ->Dict[str, "jaxlib.xla_extension.Device"]: '''simple docstring''' import jax return {str(UpperCAmelCase__): device for device in jax.devices()} def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Optional[int]) ->Union[str, Any]: '''simple docstring''' import jax import jax.numpy as jnp if isinstance(UpperCAmelCase__ , UpperCAmelCase__) and column: if all( isinstance(UpperCAmelCase__ , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column): return jnp.stack(UpperCAmelCase__ , axis=0) return column def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[str]) ->Union[str, Any]: '''simple docstring''' import jax import jax.numpy as jnp if isinstance(UpperCAmelCase__ , (str, bytes, type(UpperCAmelCase__))): return value elif isinstance(UpperCAmelCase__ , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character): return value.tolist() A__ = {} if isinstance(UpperCAmelCase__ , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: A__ = {'''dtype''': jnp.intaa} else: A__ = {'''dtype''': jnp.intaa} elif isinstance(UpperCAmelCase__ , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating): A__ = {'''dtype''': jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCAmelCase__ , PIL.Image.Image): A__ = np.asarray(UpperCAmelCase__) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A__ = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device]): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(UpperCAmelCase__ , **{**default_dtype, **self.jnp_array_kwargs}) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any]) ->List[str]: '''simple docstring''' import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(UpperCAmelCase__ , torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(UpperCAmelCase__ , '''__array__''') and not isinstance(UpperCAmelCase__ , jax.Array): A__ = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCAmelCase__ , np.ndarray): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCAmelCase__) for substruct in data_struct]) elif isinstance(UpperCAmelCase__ , (list, tuple)): return self._consolidate([self.recursive_tensorize(UpperCAmelCase__) for substruct in data_struct]) return self._tensorize(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : dict) ->str: '''simple docstring''' return map_nested(self._recursive_tensorize , UpperCAmelCase__ , map_list=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : pa.Table) ->Mapping: '''simple docstring''' A__ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase__) A__ = self.python_features_decoder.decode_row(UpperCAmelCase__) return self.recursive_tensorize(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : pa.Table) ->"jax.Array": '''simple docstring''' A__ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase__) A__ = self.python_features_decoder.decode_column(UpperCAmelCase__ , pa_table.column_names[0]) A__ = self.recursive_tensorize(UpperCAmelCase__) A__ = self._consolidate(UpperCAmelCase__) return column def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : pa.Table) ->Mapping: '''simple docstring''' A__ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase__) A__ = self.python_features_decoder.decode_batch(UpperCAmelCase__) A__ = self.recursive_tensorize(UpperCAmelCase__) for column_name in batch: A__ = self._consolidate(batch[column_name]) return batch
87
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" return 1.0 / (1.0 + np.exp(-_outputs )) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = np.max(_outputs , axis=-1 , keepdims=lowercase_ ) A__ = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase_ ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''sigmoid''' UpperCAmelCase__ = '''softmax''' UpperCAmelCase__ = '''none''' @add_end_docstrings( UpperCAmelCase__ , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = False UpperCAmelCase__ = ClassificationFunction.NONE def __init__( self : Any , **UpperCAmelCase__ : Optional[Any]) ->str: '''simple docstring''' super().__init__(**UpperCAmelCase__) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int="" , **UpperCAmelCase__ : Any) ->int: '''simple docstring''' A__ = tokenizer_kwargs A__ = {} if hasattr(self.model.config , '''return_all_scores''') and return_all_scores is None: A__ = self.model.config.return_all_scores if isinstance(UpperCAmelCase__ , UpperCAmelCase__) or top_k is None: A__ = top_k A__ = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , UpperCAmelCase__ , ) if return_all_scores: A__ = None else: A__ = 1 if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: A__ = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int]) ->Union[str, Any]: '''simple docstring''' A__ = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__) # TODO try and retrieve it in a nicer way from _sanitize_parameters. A__ = '''top_k''' not in kwargs if isinstance(args[0] , UpperCAmelCase__) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Any , **UpperCAmelCase__ : str) ->Dict[str, GenericTensor]: '''simple docstring''' A__ = self.framework if isinstance(UpperCAmelCase__ , UpperCAmelCase__): return self.tokenizer(**UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__) and len(UpperCAmelCase__) == 1 and isinstance(inputs[0] , UpperCAmelCase__) and len(inputs[0]) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''') return self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple) ->Tuple: '''simple docstring''' return self.model(**UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : str=True) ->Dict: '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: A__ = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: A__ = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''') and function_to_apply is None: A__ = self.model.config.function_to_apply else: A__ = ClassificationFunction.NONE A__ = model_outputs['''logits'''][0] A__ = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: A__ = sigmoid(UpperCAmelCase__) elif function_to_apply == ClassificationFunction.SOFTMAX: A__ = softmax(UpperCAmelCase__) elif function_to_apply == ClassificationFunction.NONE: A__ = outputs else: raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""") if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} A__ = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(UpperCAmelCase__) ] if not _legacy: dict_scores.sort(key=lambda UpperCAmelCase__: x["score"] , reverse=UpperCAmelCase__) if top_k is not None: A__ = dict_scores[:top_k] return dict_scores
87
1
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = TextToVideoSDPipeline UpperCAmelCase__ = TEXT_TO_IMAGE_PARAMS UpperCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. UpperCAmelCase__ = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: '''simple docstring''' torch.manual_seed(0) A__ = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , ) A__ = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , ) torch.manual_seed(0) A__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) A__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , ) A__ = CLIPTextModel(UpperCAmelCase__) A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''') A__ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=0) ->Any: '''simple docstring''' if str(UpperCAmelCase__).startswith('''mps'''): A__ = torch.manual_seed(UpperCAmelCase__) else: A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(UpperCAmelCase__) A__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''pt''', } return inputs def SCREAMING_SNAKE_CASE ( self : int) ->Tuple: '''simple docstring''' A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator A__ = self.get_dummy_components() A__ = TextToVideoSDPipeline(**UpperCAmelCase__) A__ = sd_pipe.to(UpperCAmelCase__) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__) A__ = self.get_dummy_inputs(UpperCAmelCase__) A__ = '''np''' A__ = sd_pipe(**UpperCAmelCase__).frames A__ = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) A__ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def SCREAMING_SNAKE_CASE ( self : Dict) ->Dict: '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase__ , expected_max_diff=3e-3) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase__ , expected_max_diff=1e-2) @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''') def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple: '''simple docstring''' pass @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''') def SCREAMING_SNAKE_CASE ( self : Dict) ->Any: '''simple docstring''' pass @unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''') def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: '''simple docstring''' A__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''') A__ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''') A__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) A__ = pipe.to('''cuda''') A__ = '''Spiderman is surfing''' A__ = torch.Generator(device='''cpu''').manual_seed(0) A__ = pipe(UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=25 , output_type='''pt''').frames A__ = video_frames.cpu().numpy() assert np.abs(expected_video - video).mean() < 5e-2 def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''') A__ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''') A__ = pipe.to('''cuda''') A__ = '''Spiderman is surfing''' A__ = torch.Generator(device='''cpu''').manual_seed(0) A__ = pipe(UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=2 , output_type='''pt''').frames A__ = video_frames.cpu().numpy() assert np.abs(expected_video - video).mean() < 5e-2
87
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCamelCase : Any = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Union[str, Any] = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
1
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]: """simple docstring""" if height >= 1: move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ ) move_disk(lowercase_ , lowercase_ ) move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" print('''moving disk from''' , lowercase_ , '''to''' , lowercase_ ) def SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" A__ = int(input('''Height of hanoi: ''' ).strip() ) move_tower(lowercase_ , '''A''' , '''B''' , '''C''' ) if __name__ == "__main__": main()
87
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Union[str, Any] = { """google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""", """google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''mobilenet_v1''' def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Optional[Any]=224 , UpperCAmelCase__ : Optional[int]=1.0 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : Tuple="relu6" , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=0.999 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[int]=0.001 , **UpperCAmelCase__ : Dict , ) ->List[str]: '''simple docstring''' super().__init__(**UpperCAmelCase__) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''') A__ = num_channels A__ = image_size A__ = depth_multiplier A__ = min_depth A__ = hidden_act A__ = tf_padding A__ = classifier_dropout_prob A__ = initializer_range A__ = layer_norm_eps class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self : Any) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict([('''pixel_values''', {0: '''batch'''})]) @property def SCREAMING_SNAKE_CASE ( self : List[str]) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})]) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})]) @property def SCREAMING_SNAKE_CASE ( self : int) ->float: '''simple docstring''' return 1e-4
87
1
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : int=99 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : List[str]="last" , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[Any]=0 , ) ->List[Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_lengths A__ = use_token_type_ids A__ = use_labels A__ = gelu_activation A__ = sinusoidal_embeddings A__ = causal A__ = asm A__ = n_langs A__ = vocab_size A__ = n_special A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = summary_type A__ = use_proj A__ = scope A__ = bos_token_id def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[Any]: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = random_attention_mask([self.batch_size, self.seq_length]) A__ = None if self.use_input_lengths: A__ = ( ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) A__ = ids_tensor([self.batch_size] , 2).float() A__ = ids_tensor([self.batch_size] , self.num_choices) A__ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: '''simple docstring''' return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , ) ->Union[str, Any]: '''simple docstring''' A__ = XLMModel(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , lengths=UpperCAmelCase__ , langs=UpperCAmelCase__) A__ = model(UpperCAmelCase__ , langs=UpperCAmelCase__) A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , ) ->Any: '''simple docstring''' A__ = XLMWithLMHeadModel(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , ) ->List[Any]: '''simple docstring''' A__ = XLMForQuestionAnsweringSimple(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__) A__ = model(UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__) A__ = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , ) ->Optional[Any]: '''simple docstring''' A__ = XLMForQuestionAnswering(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__) A__ = model( UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , cls_index=UpperCAmelCase__ , is_impossible=UpperCAmelCase__ , p_mask=UpperCAmelCase__ , ) A__ = model( UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , cls_index=UpperCAmelCase__ , is_impossible=UpperCAmelCase__ , ) ((A__) , ) = result_with_labels.to_tuple() A__ = model(UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__) ((A__) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , ()) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , ) ->int: '''simple docstring''' A__ = XLMForSequenceClassification(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__) A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , ) ->str: '''simple docstring''' A__ = self.num_labels A__ = XLMForTokenClassification(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , ) ->List[str]: '''simple docstring''' A__ = self.num_choices A__ = XLMForMultipleChoice(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() A__ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() A__ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) UpperCAmelCase__ = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable UpperCAmelCase__ = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple) ->str: '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''') ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str]=False) ->Tuple: '''simple docstring''' A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__) A__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__) return inputs_dict def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: '''simple docstring''' A__ = XLMModelTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , emb_dim=37) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->int: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->int: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Dict=1) ->int: '''simple docstring''' self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__) self.assertListEqual( [isinstance(UpperCAmelCase__ , UpperCAmelCase__) for iter_attentions in attentions] , [True] * len(UpperCAmelCase__)) self.assertEqual(len(UpperCAmelCase__) , (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(UpperCAmelCase__): # adds PAD dummy token A__ = min_length + idx + 1 A__ = min_length + idx + 1 A__ = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase__)) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Union[str, Any]=1) ->Dict: '''simple docstring''' self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__) self.assertListEqual( [isinstance(UpperCAmelCase__ , UpperCAmelCase__) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase__) , ) self.assertEqual(len(UpperCAmelCase__) , (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(UpperCAmelCase__): # adds PAD dummy token A__ = min_length + idx + 1 A__ = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase__) , ) pass @slow def SCREAMING_SNAKE_CASE ( self : List[str]) ->int: '''simple docstring''' for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = XLMModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : str) ->Dict: '''simple docstring''' A__ = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''') model.to(UpperCAmelCase__) A__ = torch.tensor([[14, 447]] , dtype=torch.long , device=UpperCAmelCase__) # the president A__ = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A__ = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase__)
87
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp _lowerCamelCase : str = 5 _lowerCamelCase : int = 10 @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = SpeechaTextTokenizer UpperCAmelCase__ = False UpperCAmelCase__ = True def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' super().setUp() A__ = sp.SentencePieceProcessor() spm_model.Load(UpperCAmelCase__) A__ = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_) for id_ in range(len(UpperCAmelCase__))] A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__)))) A__ = Path(self.tmpdirname) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file''']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file''']) A__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]: '''simple docstring''' A__ = '''<pad>''' A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<s>''') self.assertEqual(vocab_keys[1] , '''<pad>''') self.assertEqual(vocab_keys[-1] , '''j''') self.assertEqual(len(UpperCAmelCase__) , 1_001) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_001) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' A__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) A__ = tokenizer.tokenize('''This is a test''') self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , [289, 50, 14, 174, 386] , ) A__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') self.assertListEqual( UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) A__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__) self.assertListEqual(UpperCAmelCase__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8]) A__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__) self.assertListEqual( UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: '''simple docstring''' A__ = {'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , ) @require_sentencepiece class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = '''valhalla/s2t_mustc_multilinguial_medium''' UpperCAmelCase__ = '''C\'est trop cool''' UpperCAmelCase__ = '''Esto es genial''' @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict) ->Dict: '''simple docstring''' A__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name) return cls def SCREAMING_SNAKE_CASE ( self : str) ->Dict: '''simple docstring''' self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' self.assertEqual(self.tokenizer.vocab_size , 10_000) def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids) A__ = [ES_CODE, 4, 1_601, 47, 7_647, 2] A__ = self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__) A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->str: '''simple docstring''' A__ = '''fr''' A__ = self.tokenizer(self.french_text).input_ids self.assertEqual(encoded[0] , UpperCAmelCase__) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' A__ = '''fr''' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE]) A__ = '''es''' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
87
1
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = args.pruning_method A__ = args.threshold A__ = args.model_name_or_path.rstrip('''/''' ) A__ = args.target_model_path print(f"""Load fine-pruned model from {model_name_or_path}""" ) A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) A__ = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: A__ = tensor print(f"""Copied layer {name}""" ) elif "bias" in name: A__ = tensor print(f"""Copied layer {name}""" ) else: if pruning_method == "magnitude": A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = TopKBinarizer.apply(lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue A__ = name[:-6] A__ = model[f"""{prefix_}mask_scores"""] A__ , A__ = -0.1, 1.1 A__ = torch.sigmoid(lowercase_ ) A__ = s * (r - l) + l A__ = s_bar.clamp(min=0.0 , max=1.0 ) A__ = tensor * mask print(f"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: A__ = os.path.join( os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" ) if not os.path.isdir(lowercase_ ): shutil.copytree(lowercase_ , lowercase_ ) print(f"""\nCreated folder {target_model_path}""" ) torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) _lowerCamelCase : int = parser.parse_args() main(args)
87
from __future__ import annotations import requests def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict: """simple docstring""" A__ = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty""" return requests.get(lowercase_ ).json() def SCREAMING_SNAKE_CASE ( lowercase_ = 10 ) -> list[dict]: """simple docstring""" A__ = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty''' A__ = requests.get(lowercase_ ).json()[:max_stories] return [get_hackernews_story(lowercase_ ) for story_id in story_ids] def SCREAMING_SNAKE_CASE ( lowercase_ = 10 ) -> str: """simple docstring""" A__ = hackernews_top_stories(lowercase_ ) return "\n".join('''* [{title}]({url})'''.format(**lowercase_ ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
87
1
from typing import TYPE_CHECKING from ...utils import _LazyModule _lowerCamelCase : Union[str, Any] = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys _lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _lowerCamelCase : Optional[List[str]] = None _lowerCamelCase : int = """<""" if sys.byteorder == """little""" else """>""" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _lowerCamelCase : Union[str, Any] = [ np.dtype("""|b1"""), np.dtype("""|u1"""), np.dtype("""<u2"""), np.dtype(""">u2"""), np.dtype("""<i2"""), np.dtype(""">i2"""), np.dtype("""<u4"""), np.dtype(""">u4"""), np.dtype("""<i4"""), np.dtype(""">i4"""), np.dtype("""<f4"""), np.dtype(""">f4"""), np.dtype("""<f8"""), np.dtype(""">f8"""), ] @dataclass class UpperCamelCase_ : '''simple docstring''' UpperCAmelCase__ = True UpperCAmelCase__ = None # Automatically constructed UpperCAmelCase__ = "PIL.Image.Image" UpperCAmelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) UpperCAmelCase__ = field(default='''Image''' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ ) def __call__( self : List[str]) ->List[str]: '''simple docstring''' return self.pa_type def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) ->dict: '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''') if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = np.array(UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__): return {"path": value, "bytes": None} elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): return {"path": None, "bytes": value} elif isinstance(UpperCAmelCase__ , np.ndarray): # convert the image array to PNG/TIFF bytes return encode_np_array(UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , PIL.Image.Image): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(UpperCAmelCase__) elif value.get('''path''') is not None and os.path.isfile(value['''path''']): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('''path''')} elif value.get('''bytes''') is not None or value.get('''path''') is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('''bytes'''), "path": value.get('''path''')} else: raise ValueError( f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""") def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : dict , UpperCAmelCase__ : str=None) ->"PIL.Image.Image": '''simple docstring''' if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''') if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support decoding images, please install \'Pillow\'.''') if token_per_repo_id is None: A__ = {} A__ , A__ = value['''path'''], value['''bytes'''] if bytes_ is None: if path is None: raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""") else: if is_local_path(UpperCAmelCase__): A__ = PIL.Image.open(UpperCAmelCase__) else: A__ = path.split('''::''')[-1] try: A__ = string_to_dict(UpperCAmelCase__ , config.HUB_DATASETS_URL)['''repo_id'''] A__ = token_per_repo_id.get(UpperCAmelCase__) except ValueError: A__ = None with xopen(UpperCAmelCase__ , '''rb''' , use_auth_token=UpperCAmelCase__) as f: A__ = BytesIO(f.read()) A__ = PIL.Image.open(bytes_) else: A__ = PIL.Image.open(BytesIO(bytes_)) image.load() # to avoid "Too many open files" errors return image def SCREAMING_SNAKE_CASE ( self : Dict) ->Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Value return ( self if self.decode else { "bytes": Value('''binary'''), "path": Value('''string'''), } ) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[pa.StringArray, pa.StructArray, pa.ListArray]) ->pa.StructArray: '''simple docstring''' if pa.types.is_string(storage.type): A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.binary()) A__ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null()) elif pa.types.is_binary(storage.type): A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string()) A__ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null()) elif pa.types.is_struct(storage.type): if storage.type.get_field_index('''bytes''') >= 0: A__ = storage.field('''bytes''') else: A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.binary()) if storage.type.get_field_index('''path''') >= 0: A__ = storage.field('''path''') else: A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string()) A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null()) elif pa.types.is_list(storage.type): A__ = pa.array( [encode_np_array(np.array(UpperCAmelCase__))['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string()) A__ = pa.StructArray.from_arrays( [bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null()) return array_cast(UpperCAmelCase__ , self.pa_type) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : pa.StructArray) ->pa.StructArray: '''simple docstring''' @no_op_if_value_is_null def path_to_bytes(UpperCAmelCase__ : Dict): with xopen(UpperCAmelCase__ , '''rb''') as f: A__ = f.read() return bytes_ A__ = pa.array( [ (path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) A__ = pa.array( [os.path.basename(UpperCAmelCase__) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , ) A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null()) return array_cast(UpperCAmelCase__ , self.pa_type) def SCREAMING_SNAKE_CASE ( ) -> List[str]: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() A__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bytes: """simple docstring""" A__ = BytesIO() if image.format in list_image_compression_formats(): A__ = image.format else: A__ = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF''' image.save(lowercase_ , format=lowercase_ ) return buffer.getvalue() def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict: """simple docstring""" if hasattr(lowercase_ , '''filename''' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(lowercase_ )} def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) A__ = array.dtype A__ = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER A__ = dtype.kind A__ = dtype.itemsize A__ = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: A__ = np.dtype('''|u1''' ) if dtype_kind not in ["u", "i"]: raise TypeError( f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: A__ = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: A__ = dtype_byteorder + dtype_kind + str(lowercase_ ) A__ = np.dtype(lowercase_ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) A__ = PIL.Image.fromarray(array.astype(lowercase_ ) ) return {"path": None, "bytes": image_to_bytes(lowercase_ )} def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[dict]: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if objs: A__ , A__ = first_non_null_value(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(lowercase_ , np.ndarray ): A__ = no_op_if_value_is_null(lowercase_ ) return [obj_to_image_dict_func(lowercase_ ) for obj in objs] elif isinstance(lowercase_ , PIL.Image.Image ): A__ = no_op_if_value_is_null(lowercase_ ) return [obj_to_image_dict_func(lowercase_ ) for obj in objs] else: return objs else: return objs
87
1
import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() _lowerCamelCase : List[str] = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=False , lowercase_=True ) -> List[Any]: """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(f"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" ) A__ , A__ , A__ , A__ = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: A__ = cached_file(lowercase_ , lowercase_ , force_download=not use_cached_models ) A__ = config_class.from_json_file(lowercase_ ) A__ = True A__ = True print(f"""Building TensorFlow model from configuration: {config}""" ) A__ = model_class(lowercase_ ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): A__ = cached_file( lowercase_ , lowercase_ , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: A__ = load_pytorch_checkpoint_in_tfa_model(lowercase_ , lowercase_ ) if compare_with_pt_model: A__ = tf_model(tf_model.dummy_inputs , training=lowercase_ ) # build the network A__ = torch.load(lowercase_ , map_location='''cpu''' ) A__ = pt_model_class.from_pretrained( pretrained_model_name_or_path=lowercase_ , config=lowercase_ , state_dict=lowercase_ ) with torch.no_grad(): A__ = pt_model(**pt_model.dummy_inputs ) A__ = pto[0].numpy() A__ = tfo[0].numpy() A__ = np.amax(np.abs(np_pt - np_tf ) ) print(f"""Max absolute difference between models outputs {diff}""" ) assert diff <= 2E-2, f"""Error, model absolute difference is >2e-2: {diff}""" # Save pytorch-model print(f"""Save TensorFlow model to {tf_dump_path}""" ) tf_model.save_weights(lowercase_ , save_format='''h5''' ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=False , lowercase_=False , lowercase_=False , lowercase_=False , ) -> int: """simple docstring""" if args_model_type is None: A__ = list(MODEL_CLASSES.keys() ) else: A__ = [args_model_type] for j, model_type in enumerate(lowercase_ , start=1 ): print('''=''' * 100 ) print(f""" Converting model type {j}/{len(lowercase_ )}: {model_type}""" ) print('''=''' * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" ) A__ , A__ , A__ , A__ , A__ = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: A__ = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: A__ = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(lowercase_ , lowercase_ ) , start=1 ): print('''-''' * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f""" Skipping finetuned checkpoint {model_shortcut_name}""" ) continue A__ = model_shortcut_name elif only_convert_finetuned_models: print(f""" Skipping not finetuned checkpoint {model_shortcut_name}""" ) continue print( f""" Converting checkpoint {i}/{len(lowercase_ )}: {model_shortcut_name} - model_type {model_type}""" ) print('''-''' * 100 ) if config_shortcut_name in aws_config_map: A__ = cached_file(lowercase_ , lowercase_ , force_download=not use_cached_models ) else: A__ = config_shortcut_name if model_shortcut_name in aws_model_maps: A__ = cached_file(lowercase_ , lowercase_ , force_download=not use_cached_models ) else: A__ = model_shortcut_name if os.path.isfile(lowercase_ ): A__ = '''converted_model''' convert_pt_checkpoint_to_tf( model_type=lowercase_ , pytorch_checkpoint_path=lowercase_ , config_file=lowercase_ , tf_dump_path=os.path.join(lowercase_ , model_shortcut_name + '''-tf_model.h5''' ) , compare_with_pt_model=lowercase_ , ) if remove_cached_files: os.remove(lowercase_ ) os.remove(lowercase_ ) if __name__ == "__main__": _lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ''' """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") _lowerCamelCase : List[Any] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
87
from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) UpperCAmelCase__ = ( { '''feature-extraction''': TFMobileBertModel, '''fill-mask''': TFMobileBertForMaskedLM, '''question-answering''': TFMobileBertForQuestionAnswering, '''text-classification''': TFMobileBertForSequenceClassification, '''token-classification''': TFMobileBertForTokenClassification, '''zero-shot''': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=False) ->Optional[Any]: '''simple docstring''' A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__) if return_labels: if model_class in get_values(UpperCAmelCase__): A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa) return inputs_dict class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : List[str]=32 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]=512 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Tuple=None , ) ->Any: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope A__ = embedding_size def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) A__ = ids_tensor([self.batch_size] , self.num_choices) A__ = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]) ->Any: '''simple docstring''' A__ = TFMobileBertModel(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) A__ = [input_ids, input_mask] A__ = model(UpperCAmelCase__) A__ = model(UpperCAmelCase__) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple) ->Optional[Any]: '''simple docstring''' A__ = TFMobileBertForMaskedLM(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]) ->int: '''simple docstring''' A__ = TFMobileBertForNextSentencePrediction(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int) ->List[Any]: '''simple docstring''' A__ = TFMobileBertForPreTraining(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple) ->Dict: '''simple docstring''' A__ = self.num_labels A__ = TFMobileBertForSequenceClassification(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int) ->Dict: '''simple docstring''' A__ = self.num_choices A__ = TFMobileBertForMultipleChoice(config=UpperCAmelCase__) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]) ->int: '''simple docstring''' A__ = self.num_labels A__ = TFMobileBertForTokenClassification(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ = TFMobileBertForQuestionAnswering(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE ( self : Any) ->str: '''simple docstring''' A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = TFMobileBertModelTest.TFMobileBertModelTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' for model_name in ["google/mobilebert-uncased"]: A__ = TFMobileBertModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) @require_tf class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any: '''simple docstring''' A__ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''') A__ = tf.constant([[0, 1, 2, 3, 4, 5]]) A__ = model(UpperCAmelCase__)[0] A__ = [1, 6, 30_522] self.assertEqual(output.shape , UpperCAmelCase__) A__ = tf.constant( [ [ [-4.5919547, -9.248295, -9.645256], [-6.7306175, -6.440284, -6.6052837], [-7.2743506, -6.7847915, -6.024673], ] ]) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)
87
1
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = len(lowercase_ ) while cur > 1: # Find the maximum number in arr A__ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi A__ = arr[mi::-1] + arr[mi + 1 : len(lowercase_ )] # Reverse whole list A__ = arr[cur - 1 :: -1] + arr[cur : len(lowercase_ )] cur -= 1 return arr if __name__ == "__main__": _lowerCamelCase : Optional[Any] = input("""Enter numbers separated by a comma:\n""").strip() _lowerCamelCase : Any = [int(item) for item in user_input.split(""",""")] print(pancake_sort(unsorted))
87
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : str=224 , UpperCAmelCase__ : str=30 , UpperCAmelCase__ : Tuple=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Tuple=[0.5, 0.5, 0.5] , ) ->str: '''simple docstring''' A__ = size if size is not None else {'''height''': 18, '''width''': 18} A__ = parent A__ = batch_size A__ = num_channels A__ = image_size A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : List[str]) ->str: '''simple docstring''' A__ = EfficientFormerImageProcessorTester(self) @property def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''')) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image) # Test not batched input A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray) # Test not batched input A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor) # Test not batched input A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , )
87
1
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class UpperCamelCase_ : '''simple docstring''' @staticmethod def SCREAMING_SNAKE_CASE ( *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any) ->Dict: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" A__ = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any]) ->Optional[int]: '''simple docstring''' A__ = DepthEstimationPipeline(model=UpperCAmelCase__ , image_processor=UpperCAmelCase__) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any]) ->Tuple: '''simple docstring''' A__ = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''') self.assertEqual({'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)} , UpperCAmelCase__) import datasets A__ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''') A__ = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ]) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)}, {'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)}, {'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)}, {'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)}, {'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)}, ] , UpperCAmelCase__ , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''') def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' pass @slow @require_torch def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple: '''simple docstring''' A__ = '''Intel/dpt-large''' A__ = pipeline('''depth-estimation''' , model=UpperCAmelCase__) A__ = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''') A__ = hashimage(outputs['''depth''']) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item()) , 29.304) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item()) , 2.662) @require_torch def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: '''simple docstring''' self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''')
87
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance _lowerCamelCase : Dict = 6_378_137.0 _lowerCamelCase : Union[str, Any] = 6_356_752.314_245 _lowerCamelCase : List[Any] = 6378137 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> float: """simple docstring""" A__ = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) ) A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius A__ = haversine_distance(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) / EQUATORIAL_RADIUS # Intermediate P and Q values A__ = (b_lata + b_lata) / 2 A__ = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) A__ = (sin(lowercase_ ) ** 2) * (cos(lowercase_ ) ** 2) A__ = cos(sigma / 2 ) ** 2 A__ = (sigma - sin(lowercase_ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) A__ = (cos(lowercase_ ) ** 2) * (sin(lowercase_ ) ** 2) A__ = sin(sigma / 2 ) ** 2 A__ = (sigma + sin(lowercase_ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
87
1
import re def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" if len(re.findall('''[ATCG]''' , lowercase_ ) ) != len(lowercase_ ): raise ValueError('''Invalid Strand''' ) return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
87
import heapq import sys import numpy as np _lowerCamelCase : Any = tuple[int, int] class UpperCamelCase_ : '''simple docstring''' def __init__( self : Any) ->str: '''simple docstring''' A__ = [] A__ = set() def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' if not self.empty(): return self.elements[0][0] else: return float('''inf''') def SCREAMING_SNAKE_CASE ( self : Tuple) ->str: '''simple docstring''' return len(self.elements) == 0 def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any]) ->List[str]: '''simple docstring''' if item not in self.set: heapq.heappush(self.elements , (priority, item)) self.set.add(UpperCAmelCase__) else: # update # print("update", item) A__ = [] ((A__) , (A__)) = heapq.heappop(self.elements) while x != item: temp.append((pri, x)) ((A__) , (A__)) = heapq.heappop(self.elements) temp.append((priority, item)) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[Any]) ->Union[str, Any]: '''simple docstring''' if item in self.set: self.set.remove(UpperCAmelCase__) A__ = [] ((A__) , (A__)) = heapq.heappop(self.elements) while x != item: temp.append((pro, x)) ((A__) , (A__)) = heapq.heappop(self.elements) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy)) def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' return self.elements[0][1] def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' ((A__) , (A__)) = heapq.heappop(self.elements) self.set.remove(UpperCAmelCase__) return (priority, item) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = np.array(lowercase_ ) A__ = np.array(lowercase_ ) return np.linalg.norm(a - b ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" return consistent_heuristic(lowercase_ , lowercase_ ) // t def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: """simple docstring""" A__ = g_function[start] + Wa * heuristics[i](lowercase_ , lowercase_ ) return ans def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = np.chararray((n, n) ) for i in range(lowercase_ ): for j in range(lowercase_ ): A__ = '''*''' for i in range(lowercase_ ): for j in range(lowercase_ ): if (j, (n - 1) - i) in blocks: A__ = '''#''' A__ = '''-''' A__ = back_pointer[goal] while x != start: ((A__) , (A__)) = x # print(x) A__ = '''-''' A__ = back_pointer[x] A__ = '''-''' for i in range(lowercase_ ): for j in range(lowercase_ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) A__ = back_pointer[goal] while x != start: print(lowercase_ , end=''' ''' ) A__ = back_pointer[x] print(lowercase_ ) sys.exit() def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Union[str, Any]: """simple docstring""" for itera in range(lowercase_ ): open_list[itera].remove_element(lowercase_ ) # print("s", s) # print("j", j) ((A__) , (A__)) = s A__ = (x - 1, y) A__ = (x + 1, y) A__ = (x, y + 1) A__ = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(lowercase_ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(lowercase_ ) A__ = -1 A__ = float('''inf''' ) if valid(lowercase_ ) and g_function[neighbours] > g_function[s] + 1: A__ = g_function[s] + 1 A__ = s if neighbours not in close_list_anchor: open_list[0].put(lowercase_ , key(lowercase_ , 0 , lowercase_ , lowercase_ ) ) if neighbours not in close_list_inad: for var in range(1 , lowercase_ ): if key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) <= Wa * key( lowercase_ , 0 , lowercase_ , lowercase_ ): open_list[j].put( lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) ) def SCREAMING_SNAKE_CASE ( ) -> Optional[int]: """simple docstring""" A__ = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list _lowerCamelCase : Dict = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} _lowerCamelCase : Optional[Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] _lowerCamelCase : Optional[int] = make_common_ground() _lowerCamelCase : Optional[Any] = blocks_blk # hyper parameters _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : List[Any] = 20 _lowerCamelCase : Any = 3 # one consistent and two other inconsistent # start and end destination _lowerCamelCase : str = (0, 0) _lowerCamelCase : Tuple = (n - 1, n - 1) _lowerCamelCase : int = 1 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = {start: 0, goal: float('''inf''' )} A__ = {start: -1, goal: -1} A__ = [] A__ = set() for i in range(lowercase_ ): open_list.append(PriorityQueue() ) open_list[i].put(lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) ) A__ = [] A__ = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , lowercase_ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(lowercase_ , lowercase_ , lowercase_ ) else: A__ , A__ = open_list[i].top_show() visited.add(lowercase_ ) expand_state( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) close_list_inad.append(lowercase_ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(lowercase_ , lowercase_ , lowercase_ ) else: A__ = open_list[0].top_show() visited.add(lowercase_ ) expand_state( lowercase_ , 0 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) close_list_anchor.append(lowercase_ ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(lowercase_ ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
87
1
from ..utils import DummyObject, requires_backends class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''flax'''] def __init__( self : int , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[Any]) ->Dict: '''simple docstring''' requires_backends(self , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str]) ->Optional[Any]: '''simple docstring''' requires_backends(cls , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : List[str]) ->Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''flax''']) class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''flax'''] def __init__( self : Dict , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Dict) ->Any: '''simple docstring''' requires_backends(self , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : int) ->Dict: '''simple docstring''' requires_backends(cls , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Optional[int]) ->Tuple: '''simple docstring''' requires_backends(cls , ['''flax''']) class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''flax'''] def __init__( self : int , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : int) ->str: '''simple docstring''' requires_backends(self , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : List[str]) ->str: '''simple docstring''' requires_backends(cls , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Dict: '''simple docstring''' requires_backends(cls , ['''flax''']) class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''flax'''] def __init__( self : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : str) ->str: '''simple docstring''' requires_backends(self , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : int , *UpperCAmelCase__ : int , **UpperCAmelCase__ : str) ->Optional[int]: '''simple docstring''' requires_backends(cls , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[int] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[Any]) ->Any: '''simple docstring''' requires_backends(cls , ['''flax''']) class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''flax'''] def __init__( self : Optional[int] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[Any]) ->Dict: '''simple docstring''' requires_backends(self , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Tuple , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Dict) ->Any: '''simple docstring''' requires_backends(cls , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : List[str] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Optional[Any]) ->str: '''simple docstring''' requires_backends(cls , ['''flax''']) class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''flax'''] def __init__( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[Any]) ->int: '''simple docstring''' requires_backends(self , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : List[Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str]) ->int: '''simple docstring''' requires_backends(cls , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : str , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : List[str]) ->Dict: '''simple docstring''' requires_backends(cls , ['''flax''']) class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''flax'''] def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Any) ->Union[str, Any]: '''simple docstring''' requires_backends(self , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Tuple) ->int: '''simple docstring''' requires_backends(cls , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : int) ->Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''flax''']) class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''flax'''] def __init__( self : List[str] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Optional[Any]) ->Union[str, Any]: '''simple docstring''' requires_backends(self , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : List[str]) ->str: '''simple docstring''' requires_backends(cls , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : int , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : str) ->Any: '''simple docstring''' requires_backends(cls , ['''flax''']) class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''flax'''] def __init__( self : int , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[Any]) ->Union[str, Any]: '''simple docstring''' requires_backends(self , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Optional[int]: '''simple docstring''' requires_backends(cls , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : int , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : str) ->List[Any]: '''simple docstring''' requires_backends(cls , ['''flax''']) class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''flax'''] def __init__( self : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : int) ->Tuple: '''simple docstring''' requires_backends(self , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : List[Any] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]: '''simple docstring''' requires_backends(cls , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Optional[Any]) ->Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''flax''']) class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''flax'''] def __init__( self : Tuple , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Optional[int]) ->str: '''simple docstring''' requires_backends(self , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[int]) ->Tuple: '''simple docstring''' requires_backends(cls , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Dict) ->Optional[int]: '''simple docstring''' requires_backends(cls , ['''flax''']) class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''flax'''] def __init__( self : str , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : int) ->Union[str, Any]: '''simple docstring''' requires_backends(self , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Dict) ->Dict: '''simple docstring''' requires_backends(cls , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Tuple , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[Any]) ->Dict: '''simple docstring''' requires_backends(cls , ['''flax''']) class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''flax'''] def __init__( self : List[str] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Tuple) ->Optional[int]: '''simple docstring''' requires_backends(self , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[int] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : int) ->Dict: '''simple docstring''' requires_backends(cls , ['''flax''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[int] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Any) ->Tuple: '''simple docstring''' requires_backends(cls , ['''flax'''])
87
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _lowerCamelCase : Optional[Any] = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine""" def SCREAMING_SNAKE_CASE ( ) -> Dict: """simple docstring""" A__ = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: A__ = get_sagemaker_input() else: A__ = get_cluster_input() return config def SCREAMING_SNAKE_CASE ( lowercase_=None ) -> List[Any]: """simple docstring""" if subparsers is not None: A__ = subparsers.add_parser('''config''' , description=lowercase_ ) else: A__ = argparse.ArgumentParser('''Accelerate config command''' , description=lowercase_ ) parser.add_argument( '''--config_file''' , default=lowercase_ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=lowercase_ ) return parser def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = get_user_input() if args.config_file is not None: A__ = args.config_file else: if not os.path.isdir(lowercase_ ): os.makedirs(lowercase_ ) A__ = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(lowercase_ ) else: config.to_yaml_file(lowercase_ ) print(f"""accelerate configuration saved at {config_file}""" ) def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]: """simple docstring""" A__ = config_command_parser() A__ = parser.parse_args() config_command(lowercase_ ) if __name__ == "__main__": main()
87
1
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ = 0 def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple: '''simple docstring''' A__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''') self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: A__ = Path(UpperCAmelCase__) / '''preprocessor_config.json''' A__ = Path(UpperCAmelCase__) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase__ , '''w''') , ) json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase__ , '''w''')) A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: A__ = Path(UpperCAmelCase__) / '''preprocessor_config.json''' A__ = Path(UpperCAmelCase__) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase__ , '''w''') , ) json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase__ , '''w''')) A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: A__ = CLIPConfig() # Create a dummy config file with image_proceesor_type A__ = Path(UpperCAmelCase__) / '''preprocessor_config.json''' A__ = Path(UpperCAmelCase__) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase__ , '''w''') , ) json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase__ , '''w''')) # remove image_processor_type to make sure config.json alone is enough to load image processor locally A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__).to_dict() config_dict.pop('''image_processor_type''') A__ = CLIPImageProcessor(**UpperCAmelCase__) # save in new folder model_config.save_pretrained(UpperCAmelCase__) config.save_pretrained(UpperCAmelCase__) A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__) # make sure private variable is not incorrectly saved A__ = json.loads(config.to_json_string()) self.assertTrue('''_processor_class''' not in dict_as_saved) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: A__ = Path(UpperCAmelCase__) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase__ , '''w''') , ) A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->str: '''simple docstring''' with self.assertRaisesRegex( UpperCAmelCase__ , '''clip-base is not a local folder and is not a valid model identifier'''): A__ = AutoImageProcessor.from_pretrained('''clip-base''') def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: '''simple docstring''' with self.assertRaisesRegex( UpperCAmelCase__ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''): A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__ , revision='''aaaaaa''') def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]: '''simple docstring''' with self.assertRaisesRegex( UpperCAmelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): A__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''') def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' with self.assertRaises(UpperCAmelCase__): A__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''') # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCAmelCase__): A__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase__) A__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase__) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''') # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(UpperCAmelCase__) A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__ , trust_remote_code=UpperCAmelCase__) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''') def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]: '''simple docstring''' try: AutoConfig.register('''custom''' , UpperCAmelCase__) AutoImageProcessor.register(UpperCAmelCase__ , UpperCAmelCase__) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCAmelCase__): AutoImageProcessor.register(UpperCAmelCase__ , UpperCAmelCase__) with tempfile.TemporaryDirectory() as tmpdirname: A__ = Path(UpperCAmelCase__) / '''preprocessor_config.json''' A__ = Path(UpperCAmelCase__) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase__ , '''w''') , ) json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase__ , '''w''')) A__ = CustomImageProcessor.from_pretrained(UpperCAmelCase__) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(UpperCAmelCase__) A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]: '''simple docstring''' class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = True try: AutoConfig.register('''custom''' , UpperCAmelCase__) AutoImageProcessor.register(UpperCAmelCase__ , UpperCAmelCase__) # If remote code is not set, the default is to use local A__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''') self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''') self.assertTrue(image_processor.is_local) # If remote code is disabled, we load the local one. A__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase__) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''') self.assertTrue(image_processor.is_local) # If remote is enabled, we load from the Hub A__ = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase__) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''') self.assertTrue(not hasattr(UpperCAmelCase__ , '''is_local''')) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
87
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() _lowerCamelCase : int = logging.get_logger("""transformers.models.speecht5""") def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple: """simple docstring""" hf_model.apply_weight_norm() A__ = checkpoint['''input_conv.weight_g'''] A__ = checkpoint['''input_conv.weight_v'''] A__ = checkpoint['''input_conv.bias'''] for i in range(len(config.upsample_rates ) ): A__ = checkpoint[f"""upsamples.{i}.1.weight_g"""] A__ = checkpoint[f"""upsamples.{i}.1.weight_v"""] A__ = checkpoint[f"""upsamples.{i}.1.bias"""] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""] A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""] A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""] A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""] A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""] A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""] A__ = checkpoint['''output_conv.1.weight_g'''] A__ = checkpoint['''output_conv.1.weight_v'''] A__ = checkpoint['''output_conv.1.bias'''] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , ) -> str: """simple docstring""" if config_path is not None: A__ = SpeechTaHifiGanConfig.from_pretrained(lowercase_ ) else: A__ = SpeechTaHifiGanConfig() A__ = SpeechTaHifiGan(lowercase_ ) A__ = torch.load(lowercase_ ) load_weights(orig_checkpoint['''model''']['''generator'''] , lowercase_ , lowercase_ ) A__ = np.load(lowercase_ ) A__ = stats[0].reshape(-1 ) A__ = stats[1].reshape(-1 ) A__ = torch.from_numpy(lowercase_ ).float() A__ = torch.from_numpy(lowercase_ ).float() model.save_pretrained(lowercase_ ) if repo_id: print('''Pushing to the hub...''' ) model.push_to_hub(lowercase_ ) if __name__ == "__main__": _lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) _lowerCamelCase : List[str] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
87
1
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
87
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase_ : '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : Dict=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=50 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=None , ) ->Union[str, Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = initializer_range A__ = use_labels A__ = scope def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = self.get_config() return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE ( self : int) ->int: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = self.prepare_config_and_inputs() A__ = True A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] , ) ->Dict: '''simple docstring''' A__ = BertGenerationEncoder(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__) A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] , ) ->Dict: '''simple docstring''' A__ = True A__ = BertGenerationEncoder(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , ) A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[int] , ) ->Any: '''simple docstring''' A__ = True A__ = True A__ = BertGenerationDecoder(config=UpperCAmelCase__).to(UpperCAmelCase__).eval() # first forward pass A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , ) A__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size) A__ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens] , dim=-1) A__ = torch.cat([input_mask, next_mask] , dim=-1) A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0] A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0] # select random slice A__ = ids_tensor((1,) , output_from_past.shape[-1]).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3)) def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , *UpperCAmelCase__ : List[str] , ) ->List[Any]: '''simple docstring''' A__ = BertGenerationDecoder(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ , A__ , A__ , A__ = self.prepare_config_and_inputs() A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () UpperCAmelCase__ = (BertGenerationDecoder,) if is_torch_available() else () UpperCAmelCase__ = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' A__ = BertGenerationEncoderTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() A__ = '''bert''' self.model_tester.create_and_check_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() A__ = None self.model_tester.create_and_check_model_as_decoder( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]: '''simple docstring''' A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') self.assertIsNotNone(UpperCAmelCase__) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]]) with torch.no_grad(): A__ = model(UpperCAmelCase__)[0] A__ = torch.Size([1, 8, 1_024]) self.assertEqual(output.shape , UpperCAmelCase__) A__ = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' A__ = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]]) with torch.no_grad(): A__ = model(UpperCAmelCase__)[0] A__ = torch.Size([1, 8, 50_358]) self.assertEqual(output.shape , UpperCAmelCase__) A__ = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4))
87
1
from __future__ import annotations _lowerCamelCase : Tuple = tuple[int, int, int] _lowerCamelCase : Union[str, Any] = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase _lowerCamelCase : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" # -------------------------- default selection -------------------------- # rotors -------------------------- _lowerCamelCase : str = """EGZWVONAHDCLFQMSIPJBYUKXTR""" _lowerCamelCase : str = """FOBHMDKEXQNRAULPGSJVTYICZW""" _lowerCamelCase : List[Any] = """ZJXESIUQLHAVRMDOYGTNFWPBKC""" # reflector -------------------------- _lowerCamelCase : Union[str, Any] = { """A""": """N""", """N""": """A""", """B""": """O""", """O""": """B""", """C""": """P""", """P""": """C""", """D""": """Q""", """Q""": """D""", """E""": """R""", """R""": """E""", """F""": """S""", """S""": """F""", """G""": """T""", """T""": """G""", """H""": """U""", """U""": """H""", """I""": """V""", """V""": """I""", """J""": """W""", """W""": """J""", """K""": """X""", """X""": """K""", """L""": """Y""", """Y""": """L""", """M""": """Z""", """Z""": """M""", } # -------------------------- extra rotors -------------------------- _lowerCamelCase : Tuple = """RMDJXFUWGISLHVTCQNKYPBEZOA""" _lowerCamelCase : Any = """SGLCPQWZHKXAREONTFBVIYJUDM""" _lowerCamelCase : Optional[Any] = """HVSICLTYKQUBXDWAJZOMFGPREN""" _lowerCamelCase : Dict = """RZWQHFMVDBKICJLNTUXAGYPSOE""" _lowerCamelCase : List[str] = """LFKIJODBEGAMQPXVUHYSTCZRWN""" _lowerCamelCase : Optional[int] = """KOAEGVDHXPQZMLFTYWJNBRCIUS""" def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]: """simple docstring""" if (unique_rotsel := len(set(lowercase_ ) )) < 3: A__ = f"""Please use 3 unique rotors (not {unique_rotsel})""" raise Exception(lowercase_ ) # Checks if rotor positions are valid A__ , A__ , A__ = rotpos if not 0 < rotorposa <= len(lowercase_ ): A__ = f"""First rotor position is not within range of 1..26 ({rotorposa}""" raise ValueError(lowercase_ ) if not 0 < rotorposa <= len(lowercase_ ): A__ = f"""Second rotor position is not within range of 1..26 ({rotorposa})""" raise ValueError(lowercase_ ) if not 0 < rotorposa <= len(lowercase_ ): A__ = f"""Third rotor position is not within range of 1..26 ({rotorposa})""" raise ValueError(lowercase_ ) # Validates string and returns dict A__ = _plugboard(lowercase_ ) return rotpos, rotsel, pbdict def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict[str, str]: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ): A__ = f"""Plugboard setting isn't type string ({type(lowercase_ )})""" raise TypeError(lowercase_ ) elif len(lowercase_ ) % 2 != 0: A__ = f"""Odd number of symbols ({len(lowercase_ )})""" raise Exception(lowercase_ ) elif pbstring == "": return {} pbstring.replace(''' ''' , '''''' ) # Checks if all characters are unique A__ = set() for i in pbstring: if i not in abc: A__ = f"""'{i}' not in list of symbols""" raise Exception(lowercase_ ) elif i in tmppbl: A__ = f"""Duplicate symbol ({i})""" raise Exception(lowercase_ ) else: tmppbl.add(lowercase_ ) del tmppbl # Created the dictionary A__ = {} for j in range(0 , len(lowercase_ ) - 1 , 2 ): A__ = pbstring[j + 1] A__ = pbstring[j] return pb def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = (rotora, rotora, rotora) , lowercase_ = "" , ) -> str: """simple docstring""" A__ = text.upper() A__ , A__ , A__ = _validator( lowercase_ , lowercase_ , plugb.upper() ) A__ , A__ , A__ = rotor_position A__ , A__ , A__ = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 A__ = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: A__ = plugboard[symbol] # rotor ra -------------------------- A__ = abc.index(lowercase_ ) + rotorposa A__ = rotora[index % len(lowercase_ )] # rotor rb -------------------------- A__ = abc.index(lowercase_ ) + rotorposa A__ = rotora[index % len(lowercase_ )] # rotor rc -------------------------- A__ = abc.index(lowercase_ ) + rotorposa A__ = rotora[index % len(lowercase_ )] # reflector -------------------------- # this is the reason you don't need another machine to decipher A__ = reflector[symbol] # 2nd rotors A__ = abc[rotora.index(lowercase_ ) - rotorposa] A__ = abc[rotora.index(lowercase_ ) - rotorposa] A__ = abc[rotora.index(lowercase_ ) - rotorposa] # 2nd plugboard if symbol in plugboard: A__ = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(lowercase_ ): A__ = 0 rotorposa += 1 if rotorposa >= len(lowercase_ ): A__ = 0 rotorposa += 1 if rotorposa >= len(lowercase_ ): A__ = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(lowercase_ ) return "".join(lowercase_ ) if __name__ == "__main__": _lowerCamelCase : Union[str, Any] = """This is my Python script that emulates the Enigma machine from WWII.""" _lowerCamelCase : str = (1, 1, 1) _lowerCamelCase : Optional[Any] = """pictures""" _lowerCamelCase : List[str] = (rotora, rotora, rotora) _lowerCamelCase : Any = enigma(message, rotor_pos, rotor_sel, pb) print("""Encrypted message:""", en) print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
87
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging _lowerCamelCase : int = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict: """simple docstring""" A__ = set() A__ = [] def parse_line(lowercase_ ): for line in fp: if isinstance(lowercase_ , lowercase_ ): A__ = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(lowercase_ ) > 0: A__ = '''\n'''.join(lowercase_ ) # Only keep the warnings specified in `targets` if any(f""": {x}: """ in warning for x in targets ): selected_warnings.add(lowercase_ ) buffer.clear() continue else: A__ = line.strip() buffer.append(lowercase_ ) if from_gh: for filename in os.listdir(lowercase_ ): A__ = os.path.join(lowercase_ , lowercase_ ) if not os.path.isdir(lowercase_ ): # read the file if filename != "warnings.txt": continue with open(lowercase_ ) as fp: parse_line(lowercase_ ) else: try: with zipfile.ZipFile(lowercase_ ) as z: for filename in z.namelist(): if not os.path.isdir(lowercase_ ): # read the file if filename != "warnings.txt": continue with z.open(lowercase_ ) as fp: parse_line(lowercase_ ) except Exception: logger.warning( f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" ) return selected_warnings def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = set() A__ = [os.path.join(lowercase_ , lowercase_ ) for p in os.listdir(lowercase_ ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(lowercase_ , lowercase_ ) ) return selected_warnings if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" return values.split(''',''' ) _lowerCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) _lowerCamelCase : List[Any] = parser.parse_args() _lowerCamelCase : List[str] = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links _lowerCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts _lowerCamelCase : Any = extract_warnings(args.output_dir, args.targets) _lowerCamelCase : Optional[Any] = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
87
1
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py _lowerCamelCase : Dict = """src/transformers""" _lowerCamelCase : Tuple = """docs/source/en/tasks""" def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int: """simple docstring""" with open(lowercase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A__ = f.readlines() # Find the start prompt. A__ = 0 while not lines[start_index].startswith(lowercase_ ): start_index += 1 start_index += 1 A__ = start_index while not lines[end_index].startswith(lowercase_ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. _lowerCamelCase : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH) _lowerCamelCase : List[Any] = { """asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, """audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, """language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, """image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, """masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, """multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, """object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, """question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, """semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, """sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, """summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, """token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, """translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, """video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, """document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, """monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). _lowerCamelCase : List[str] = { """summarization.md""": ("""nllb""",), """translation.md""": ("""nllb""",), } def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" A__ = TASK_GUIDE_TO_MODELS[task_guide] A__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase_ , set() ) A__ = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Any: """simple docstring""" A__ , A__ , A__ , A__ = _find_text_in_file( filename=os.path.join(lowercase_ , lowercase_ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , ) A__ = get_model_list_for_task(lowercase_ ) if current_list != new_list: if overwrite: with open(os.path.join(lowercase_ , lowercase_ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" ''' to fix this.''' ) if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") _lowerCamelCase : Optional[Any] = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
87
class UpperCamelCase_ : # Public class to implement a graph '''simple docstring''' def __init__( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->None: '''simple docstring''' A__ = row A__ = col A__ = graph def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->bool: '''simple docstring''' return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->None: '''simple docstring''' A__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order A__ = [-1, 0, 1, -1, 1, -1, 0, 1] A__ = True # Make those cells visited for k in range(8): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: # And finally, count all islands. '''simple docstring''' A__ = [[False for j in range(self.COL)] for i in range(self.ROW)] A__ = 0 for i in range(self.ROW): for j in range(self.COL): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) count += 1 return count
87
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer _lowerCamelCase : List[str] = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast _lowerCamelCase : List[str] = TaTokenizerFast _lowerCamelCase : List[str] = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[int] = [ """MT5EncoderModel""", """MT5ForConditionalGeneration""", """MT5ForQuestionAnswering""", """MT5Model""", """MT5PreTrainedModel""", """MT5Stack""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Union[str, Any] = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys _lowerCamelCase : List[Any] = _LazyModule( __name__, globals()["""__file__"""], _import_structure, extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast}, module_spec=__spec__, )
87
from __future__ import annotations import requests _lowerCamelCase : str = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 1 , lowercase_ = "new" , lowercase_ = None ) -> dict: """simple docstring""" A__ = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(lowercase_ ) - valid_terms ) ): A__ = f"""Invalid search term: {invalid_search_terms}""" raise ValueError(lowercase_ ) A__ = requests.get( f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , ) if response.status_code == 429: raise requests.HTTPError A__ = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(lowercase_ )} A__ = {} for id_ in range(lowercase_ ): A__ = { item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
87
1
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging _lowerCamelCase : int = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict: """simple docstring""" A__ = set() A__ = [] def parse_line(lowercase_ ): for line in fp: if isinstance(lowercase_ , lowercase_ ): A__ = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(lowercase_ ) > 0: A__ = '''\n'''.join(lowercase_ ) # Only keep the warnings specified in `targets` if any(f""": {x}: """ in warning for x in targets ): selected_warnings.add(lowercase_ ) buffer.clear() continue else: A__ = line.strip() buffer.append(lowercase_ ) if from_gh: for filename in os.listdir(lowercase_ ): A__ = os.path.join(lowercase_ , lowercase_ ) if not os.path.isdir(lowercase_ ): # read the file if filename != "warnings.txt": continue with open(lowercase_ ) as fp: parse_line(lowercase_ ) else: try: with zipfile.ZipFile(lowercase_ ) as z: for filename in z.namelist(): if not os.path.isdir(lowercase_ ): # read the file if filename != "warnings.txt": continue with z.open(lowercase_ ) as fp: parse_line(lowercase_ ) except Exception: logger.warning( f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" ) return selected_warnings def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = set() A__ = [os.path.join(lowercase_ , lowercase_ ) for p in os.listdir(lowercase_ ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(lowercase_ , lowercase_ ) ) return selected_warnings if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" return values.split(''',''' ) _lowerCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) _lowerCamelCase : List[Any] = parser.parse_args() _lowerCamelCase : List[str] = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links _lowerCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts _lowerCamelCase : Any = extract_warnings(args.output_dir, args.targets) _lowerCamelCase : Optional[Any] = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
87
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = JukeboxTokenizer UpperCAmelCase__ = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' import torch A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''') A__ = tokenizer(**self.metas)['''input_ids'''] # fmt: off A__ = [ torch.tensor([[ 0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]]), torch.tensor([[0, 0, 0, 1_069, 11]]), torch.tensor([[0, 0, 0, 1_069, 11]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2])) @require_torch def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]: '''simple docstring''' import torch A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''') A__ = tokenizer(**self.metas)['''input_ids'''] # fmt: off A__ = [ torch.tensor([[ 0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]]), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
87
1
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = BigBirdConfig.from_json_file(lowercase_ ) print(f"""Building PyTorch model from configuration: {config}""" ) if is_trivia_qa: A__ = BigBirdForQuestionAnswering(lowercase_ ) else: A__ = BigBirdForPreTraining(lowercase_ ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(lowercase_ , lowercase_ , is_trivia_qa=lowercase_ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowercase_ ) if __name__ == "__main__": _lowerCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--big_bird_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head.""" ) _lowerCamelCase : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
87
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : List[str] = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''openai-gpt''' UpperCAmelCase__ = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict=40_478 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=1e-5 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Any="cls_index" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=0.1 , **UpperCAmelCase__ : Dict , ) ->Any: '''simple docstring''' A__ = vocab_size A__ = n_positions A__ = n_embd A__ = n_layer A__ = n_head A__ = afn A__ = resid_pdrop A__ = embd_pdrop A__ = attn_pdrop A__ = layer_norm_epsilon A__ = initializer_range A__ = summary_type A__ = summary_use_proj A__ = summary_activation A__ = summary_first_dropout A__ = summary_proj_to_labels super().__init__(**UpperCAmelCase__)
87
1
class UpperCamelCase_ : '''simple docstring''' def __init__( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any]) ->str: '''simple docstring''' A__ = name A__ = value A__ = weight def __repr__( self : Any) ->Optional[Any]: '''simple docstring''' return f"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})""" def SCREAMING_SNAKE_CASE ( self : int) ->int: '''simple docstring''' return self.value def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str: '''simple docstring''' return self.name def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]: '''simple docstring''' return self.weight def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]: '''simple docstring''' return self.value / self.weight def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = [] for i in range(len(lowercase_ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = sorted(lowercase_ , key=lowercase_ , reverse=lowercase_ ) A__ = [] A__ , A__ = 0.0, 0.0 for i in range(len(lowercase_ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
87
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
87
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: _lowerCamelCase : List[str] = None _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} _lowerCamelCase : Any = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", }, """tokenizer_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""", }, } _lowerCamelCase : Any = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } _lowerCamelCase : Any = """▁""" class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = AlbertTokenizer def __init__( self : int , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Optional[Any]="[CLS]" , UpperCAmelCase__ : str="[SEP]" , UpperCAmelCase__ : int="<unk>" , UpperCAmelCase__ : Union[str, Any]="[SEP]" , UpperCAmelCase__ : Dict="<pad>" , UpperCAmelCase__ : List[Any]="[CLS]" , UpperCAmelCase__ : str="[MASK]" , **UpperCAmelCase__ : int , ) ->str: '''simple docstring''' A__ = ( AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ , normalized=UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__) else mask_token ) super().__init__( UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''') if not os.path.isdir(UpperCAmelCase__): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return A__ = os.path.join( UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase__): copyfile(self.vocab_file , UpperCAmelCase__) return (out_vocab_file,)
87
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Dict: """simple docstring""" if "." in tensor_name: A__ = tensor_name.split('''.''' ) for split in splits[:-1]: A__ = getattr(lowercase_ , lowercase_ ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) A__ = new_module A__ = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) A__ = tensor_name in module._buffers A__ = getattr(lowercase_ , lowercase_ ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) A__ = False A__ = False if is_buffer or not is_bitsandbytes_available(): A__ = False A__ = False else: A__ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) A__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: A__ = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: A__ = old_value.to(lowercase_ ) elif isinstance(lowercase_ , torch.Tensor ): A__ = value.to('''cpu''' ) if value.dtype == torch.inta: A__ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: A__ = torch.tensor(lowercase_ , device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , lowercase_ ) and fpaa_statistics is None: A__ = new_value.T A__ = old_value.__dict__ if is_abit: A__ = bnb.nn.IntaParams(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ ) elif is_abit: A__ = bnb.nn.Paramsabit(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ ) A__ = new_value if fpaa_statistics is not None: setattr(module.weight , '''SCB''' , fpaa_statistics.to(lowercase_ ) ) else: if value is None: A__ = old_value.to(lowercase_ ) elif isinstance(lowercase_ , torch.Tensor ): A__ = value.to(lowercase_ ) else: A__ = torch.tensor(lowercase_ , device=lowercase_ ) if is_buffer: A__ = new_value else: A__ = nn.Parameter(lowercase_ , requires_grad=old_value.requires_grad ) A__ = new_value def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False ) -> Dict: """simple docstring""" for name, module in model.named_children(): if current_key_name is None: A__ = [] current_key_name.append(lowercase_ ) if (isinstance(lowercase_ , nn.Linear ) or isinstance(lowercase_ , lowercase_ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(lowercase_ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(lowercase_ , lowercase_ ): A__ , A__ = module.weight.shape else: A__ = module.in_features A__ = module.out_features if quantization_config.quantization_method() == "llm_int8": A__ = bnb.nn.LinearabitLt( lowercase_ , lowercase_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) A__ = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: A__ = bnb.nn.Linearabit( lowercase_ , lowercase_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) A__ = True # Store the module class in case we need to transpose the weight later A__ = type(lowercase_ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(lowercase_ ) if len(list(module.children() ) ) > 0: A__ , A__ = _replace_with_bnb_linear( lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_been_replaced=lowercase_ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Tuple: """simple docstring""" A__ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert A__ , A__ = _replace_with_bnb_linear( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Dict: """simple docstring""" warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , lowercase_ , ) return replace_with_bnb_linear(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Optional[Any]: """simple docstring""" warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , lowercase_ , ) return set_module_quantized_tensor_to_device(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = deepcopy(lowercase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() A__ = find_tied_parameters(lowercase_ ) # For compatibility with Accelerate < 0.18 if isinstance(lowercase_ , lowercase_ ): A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: A__ = sum(lowercase_ , [] ) A__ = len(lowercase_ ) > 0 # Check if it is a base model A__ = not hasattr(lowercase_ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A__ = list(model.named_children() ) A__ = [list_modules[-1][0]] # add last module together with tied weights A__ = set(lowercase_ ) - set(lowercase_ ) A__ = list(set(lowercase_ ) ) + list(lowercase_ ) # remove ".weight" from the keys A__ = ['''.weight''', '''.bias'''] A__ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A__ = name.replace(lowercase_ , '''''' ) filtered_module_names.append(lowercase_ ) return filtered_module_names
87
1
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("""Googling.....""") _lowerCamelCase : List[Any] = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:]) _lowerCamelCase : Union[str, Any] = requests.get(url, headers={"""UserAgent""": UserAgent().random}) # res.raise_for_status() with open("""project1a.html""", """wb""") as out_file: # only for knowing the class for data in res.iter_content(10000): out_file.write(data) _lowerCamelCase : Union[str, Any] = BeautifulSoup(res.text, """html.parser""") _lowerCamelCase : Any = list(soup.select(""".eZt8xd"""))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("""href""")) else: webbrowser.open(F'''https://google.com{link.get('href')}''')
87
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) _lowerCamelCase : str = 299792458 # Symbols _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = symbols("""ct x y z""") def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float: """simple docstring""" if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float: """simple docstring""" return 1 / sqrt(1 - beta(lowercase_ ) ** 2 ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> np.ndarray: """simple docstring""" return np.array( [ [gamma(lowercase_ ), -gamma(lowercase_ ) * beta(lowercase_ ), 0, 0], [-gamma(lowercase_ ) * beta(lowercase_ ), gamma(lowercase_ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = None ) -> np.ndarray: """simple docstring""" if event is None: A__ = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(lowercase_ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: _lowerCamelCase : Tuple = transform(29979245) print("""Example of four vector: """) print(F'''ct\' = {four_vector[0]}''') print(F'''x\' = {four_vector[1]}''') print(F'''y\' = {four_vector[2]}''') print(F'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values _lowerCamelCase : int = {ct: c, x: 1, y: 1, z: 1} _lowerCamelCase : Any = [four_vector[i].subs(sub_dict) for i in range(4)] print(F'''\n{numerical_vector}''')
87
1
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys _lowerCamelCase : Optional[int] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""") _lowerCamelCase : Any = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split() _lowerCamelCase : Optional[int] = """|""".join(sys.argv[1:]) _lowerCamelCase : str = re.compile(rF'''^({joined_dirs}).*?\.py$''') _lowerCamelCase : List[str] = [x for x in modified_files if regex.match(x)] print(""" """.join(relevant_modified_files), end="""""")
87
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list: """simple docstring""" if len(lowercase_ ) <= 1: return [tuple(lowercase_ )] A__ = [] def generate(lowercase_ , lowercase_ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowercase_ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A__ , A__ = arr[k - 1], arr[i] else: # k is odd A__ , A__ = arr[k - 1], arr[0] generate(k - 1 , lowercase_ ) generate(len(lowercase_ ) , lowercase_ ) return res if __name__ == "__main__": _lowerCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip() _lowerCamelCase : str = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
87
1
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup _lowerCamelCase : Optional[int] = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Union[str, Any] , **UpperCAmelCase__ : Optional[int]) ->Optional[int]: '''simple docstring''' requires_backends(self , ['''bs4''']) super().__init__(**UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Dict) ->Optional[Any]: '''simple docstring''' A__ = [] A__ = [] A__ = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag A__ = parent.find_all(child.name , recursive=UpperCAmelCase__) xpath_tags.append(child.name) xpath_subscripts.append( 0 if 1 == len(UpperCAmelCase__) else next(i for i, s in enumerate(UpperCAmelCase__ , 1) if s is child)) A__ = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict) ->Optional[int]: '''simple docstring''' A__ = BeautifulSoup(UpperCAmelCase__ , '''html.parser''') A__ = [] A__ = [] A__ = [] for element in html_code.descendants: if type(UpperCAmelCase__) == bsa.element.NavigableString: if type(element.parent) != bsa.element.Tag: continue A__ = html.unescape(UpperCAmelCase__).strip() if not text_in_this_tag: continue all_doc_strings.append(UpperCAmelCase__) A__ , A__ = self.xpath_soup(UpperCAmelCase__) stringaxtag_seq.append(UpperCAmelCase__) stringaxsubs_seq.append(UpperCAmelCase__) if len(UpperCAmelCase__) != len(UpperCAmelCase__): raise ValueError('''Number of doc strings and xtags does not correspond''') if len(UpperCAmelCase__) != len(UpperCAmelCase__): raise ValueError('''Number of doc strings and xsubs does not correspond''') return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int]) ->Optional[Any]: '''simple docstring''' A__ = '''''' for tagname, subs in zip(UpperCAmelCase__ , UpperCAmelCase__): xpath += f"""/{tagname}""" if subs != 0: xpath += f"""[{subs}]""" return xpath def __call__( self : Optional[Any] , UpperCAmelCase__ : Tuple) ->BatchFeature: '''simple docstring''' A__ = False # Check that strings has a valid type if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = True elif isinstance(UpperCAmelCase__ , (list, tuple)): if len(UpperCAmelCase__) == 0 or isinstance(html_strings[0] , UpperCAmelCase__): A__ = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' f"""but is of type {type(UpperCAmelCase__)}.""") A__ = bool(isinstance(UpperCAmelCase__ , (list, tuple)) and (isinstance(html_strings[0] , UpperCAmelCase__))) if not is_batched: A__ = [html_strings] # Get nodes + xpaths A__ = [] A__ = [] for html_string in html_strings: A__ , A__ , A__ = self.get_three_from_single(UpperCAmelCase__) nodes.append(UpperCAmelCase__) A__ = [] for node, tag_list, sub_list in zip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__): A__ = self.construct_xpath(UpperCAmelCase__ , UpperCAmelCase__) xpath_strings.append(UpperCAmelCase__) xpaths.append(UpperCAmelCase__) # return as Dict A__ = {'''nodes''': nodes, '''xpaths''': xpaths} A__ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__) return encoded_inputs
87
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" return 1.0 / (1.0 + np.exp(-_outputs )) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = np.max(_outputs , axis=-1 , keepdims=lowercase_ ) A__ = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase_ ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''sigmoid''' UpperCAmelCase__ = '''softmax''' UpperCAmelCase__ = '''none''' @add_end_docstrings( UpperCAmelCase__ , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = False UpperCAmelCase__ = ClassificationFunction.NONE def __init__( self : Any , **UpperCAmelCase__ : Optional[Any]) ->str: '''simple docstring''' super().__init__(**UpperCAmelCase__) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int="" , **UpperCAmelCase__ : Any) ->int: '''simple docstring''' A__ = tokenizer_kwargs A__ = {} if hasattr(self.model.config , '''return_all_scores''') and return_all_scores is None: A__ = self.model.config.return_all_scores if isinstance(UpperCAmelCase__ , UpperCAmelCase__) or top_k is None: A__ = top_k A__ = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , UpperCAmelCase__ , ) if return_all_scores: A__ = None else: A__ = 1 if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: A__ = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int]) ->Union[str, Any]: '''simple docstring''' A__ = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__) # TODO try and retrieve it in a nicer way from _sanitize_parameters. A__ = '''top_k''' not in kwargs if isinstance(args[0] , UpperCAmelCase__) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Any , **UpperCAmelCase__ : str) ->Dict[str, GenericTensor]: '''simple docstring''' A__ = self.framework if isinstance(UpperCAmelCase__ , UpperCAmelCase__): return self.tokenizer(**UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__) and len(UpperCAmelCase__) == 1 and isinstance(inputs[0] , UpperCAmelCase__) and len(inputs[0]) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''') return self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple) ->Tuple: '''simple docstring''' return self.model(**UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : str=True) ->Dict: '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: A__ = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: A__ = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''') and function_to_apply is None: A__ = self.model.config.function_to_apply else: A__ = ClassificationFunction.NONE A__ = model_outputs['''logits'''][0] A__ = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: A__ = sigmoid(UpperCAmelCase__) elif function_to_apply == ClassificationFunction.SOFTMAX: A__ = softmax(UpperCAmelCase__) elif function_to_apply == ClassificationFunction.NONE: A__ = outputs else: raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""") if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} A__ = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(UpperCAmelCase__) ] if not _legacy: dict_scores.sort(key=lambda UpperCAmelCase__: x["score"] , reverse=UpperCAmelCase__) if top_k is not None: A__ = dict_scores[:top_k] return dict_scores
87
1
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" return EnvironmentCommand() class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' @staticmethod def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ : ArgumentParser) ->int: '''simple docstring''' A__ = parser.add_parser('''env''') download_parser.set_defaults(func=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = huggingface_hub.__version__ A__ = '''not installed''' A__ = '''NA''' if is_torch_available(): import torch A__ = torch.__version__ A__ = torch.cuda.is_available() A__ = '''not installed''' if is_transformers_available(): import transformers A__ = transformers.__version__ A__ = '''not installed''' if is_accelerate_available(): import accelerate A__ = accelerate.__version__ A__ = '''not installed''' if is_xformers_available(): import xformers A__ = xformers.__version__ A__ = { '''`diffusers` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""", '''Huggingface_hub version''': hub_version, '''Transformers version''': transformers_version, '''Accelerate version''': accelerate_version, '''xFormers version''': xformers_version, '''Using GPU in script?''': '''<fill in>''', '''Using distributed or parallel set-up in script?''': '''<fill in>''', } print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''') print(self.format_dict(UpperCAmelCase__)) return info @staticmethod def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ : Tuple) ->Tuple: '''simple docstring''' return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()]) + "\n"
87
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCamelCase : Any = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Union[str, Any] = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Tuple = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { """microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""", # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''cvt''' def __init__( self : List[str] , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : int=[7, 3, 3] , UpperCAmelCase__ : List[str]=[4, 2, 2] , UpperCAmelCase__ : Dict=[2, 1, 1] , UpperCAmelCase__ : Optional[Any]=[64, 192, 384] , UpperCAmelCase__ : Tuple=[1, 3, 6] , UpperCAmelCase__ : Optional[int]=[1, 2, 10] , UpperCAmelCase__ : Any=[4.0, 4.0, 4.0] , UpperCAmelCase__ : Optional[Any]=[0.0, 0.0, 0.0] , UpperCAmelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCAmelCase__ : List[Any]=[0.0, 0.0, 0.1] , UpperCAmelCase__ : List[Any]=[True, True, True] , UpperCAmelCase__ : List[Any]=[False, False, True] , UpperCAmelCase__ : Any=["dw_bn", "dw_bn", "dw_bn"] , UpperCAmelCase__ : str=[3, 3, 3] , UpperCAmelCase__ : Optional[int]=[1, 1, 1] , UpperCAmelCase__ : Tuple=[2, 2, 2] , UpperCAmelCase__ : List[Any]=[1, 1, 1] , UpperCAmelCase__ : int=[1, 1, 1] , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Union[str, Any]=1e-12 , **UpperCAmelCase__ : Any , ) ->Optional[Any]: '''simple docstring''' super().__init__(**UpperCAmelCase__) A__ = num_channels A__ = patch_sizes A__ = patch_stride A__ = patch_padding A__ = embed_dim A__ = num_heads A__ = depth A__ = mlp_ratio A__ = attention_drop_rate A__ = drop_rate A__ = drop_path_rate A__ = qkv_bias A__ = cls_token A__ = qkv_projection_method A__ = kernel_qkv A__ = padding_kv A__ = stride_kv A__ = padding_q A__ = stride_q A__ = initializer_range A__ = layer_norm_eps
87
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Union[str, Any] = { """google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""", """google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''mobilenet_v1''' def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Optional[Any]=224 , UpperCAmelCase__ : Optional[int]=1.0 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : Tuple="relu6" , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=0.999 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[int]=0.001 , **UpperCAmelCase__ : Dict , ) ->List[str]: '''simple docstring''' super().__init__(**UpperCAmelCase__) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''') A__ = num_channels A__ = image_size A__ = depth_multiplier A__ = min_depth A__ = hidden_act A__ = tf_padding A__ = classifier_dropout_prob A__ = initializer_range A__ = layer_norm_eps class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self : Any) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict([('''pixel_values''', {0: '''batch'''})]) @property def SCREAMING_SNAKE_CASE ( self : List[str]) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})]) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})]) @property def SCREAMING_SNAKE_CASE ( self : int) ->float: '''simple docstring''' return 1e-4
87
1
import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("""0.8.3"""): raise Exception("""requires gluonnlp == 0.8.3""") if version.parse(mx.__version__) != version.parse("""1.5.0"""): raise Exception("""requires mxnet == 1.5.0""") logging.set_verbosity_info() _lowerCamelCase : Dict = logging.get_logger(__name__) _lowerCamelCase : int = """The Nymphenburg Palace is a beautiful palace in Munich!""" def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]: """simple docstring""" A__ = { '''attention_cell''': '''multi_head''', '''num_layers''': 4, '''units''': 1_024, '''hidden_size''': 768, '''max_length''': 512, '''num_heads''': 8, '''scaled''': True, '''dropout''': 0.1, '''use_residual''': True, '''embed_size''': 1_024, '''embed_dropout''': 0.1, '''word_embed''': None, '''layer_norm_eps''': 1E-5, '''token_type_vocab_size''': 2, } A__ = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py A__ = BERTEncoder( attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=lowercase_ , output_all_encodings=lowercase_ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , lowercase_ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later A__ = '''openwebtext_ccnews_stories_books_cased''' # Specify download folder to Gluonnlp's vocab A__ = os.path.join(get_home_dir() , '''models''' ) A__ = _load_vocab(lowercase_ , lowercase_ , lowercase_ , cls=lowercase_ ) A__ = nlp.model.BERTModel( lowercase_ , len(lowercase_ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=lowercase_ , use_token_type_embed=lowercase_ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=lowercase_ , use_decoder=lowercase_ , ) original_bort.load_parameters(lowercase_ , cast_dtype=lowercase_ , ignore_extra=lowercase_ ) A__ = original_bort._collect_params_with_prefix() # Build our config 🤗 A__ = { '''architectures''': ['''BertForMaskedLM'''], '''attention_probs_dropout_prob''': predefined_args['''dropout'''], '''hidden_act''': '''gelu''', '''hidden_dropout_prob''': predefined_args['''dropout'''], '''hidden_size''': predefined_args['''embed_size'''], '''initializer_range''': 0.02, '''intermediate_size''': predefined_args['''hidden_size'''], '''layer_norm_eps''': predefined_args['''layer_norm_eps'''], '''max_position_embeddings''': predefined_args['''max_length'''], '''model_type''': '''bort''', '''num_attention_heads''': predefined_args['''num_heads'''], '''num_hidden_layers''': predefined_args['''num_layers'''], '''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa '''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa '''vocab_size''': len(lowercase_ ), } A__ = BertConfig.from_dict(lowercase_ ) A__ = BertForMaskedLM(lowercase_ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowercase_ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowercase_ , lowercase_ ): A__ = hf_param.shape A__ = to_torch(params[gluon_param] ) A__ = gluon_param.shape assert ( shape_hf == shape_gluon ), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers""" return gluon_param A__ = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' ) A__ = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' ) A__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' ) A__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) A__ = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): A__ = hf_bort_model.bert.encoder.layer[i] # self attention A__ = layer.attention.self A__ = check_and_map_params( self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" ) A__ = check_and_map_params( self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" ) A__ = check_and_map_params( self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" ) A__ = check_and_map_params( self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" ) A__ = check_and_map_params( self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" ) A__ = check_and_map_params( self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" ) # self attention output A__ = layer.attention.output A__ = check_and_map_params( self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" ) A__ = check_and_map_params( self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" ) A__ = check_and_map_params( self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" ) A__ = check_and_map_params( self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" ) # intermediate A__ = layer.intermediate A__ = check_and_map_params( intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" ) A__ = check_and_map_params( intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" ) # output A__ = layer.output A__ = check_and_map_params( bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" ) A__ = check_and_map_params( bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" ) A__ = check_and_map_params( bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" ) A__ = check_and_map_params( bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models A__ = RobertaTokenizer.from_pretrained('''roberta-base''' ) A__ = tokenizer.encode_plus(lowercase_ )['''input_ids'''] # Get gluon output A__ = mx.nd.array([input_ids] ) A__ = original_bort(inputs=lowercase_ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowercase_ ) A__ = BertModel.from_pretrained(lowercase_ ) hf_bort_model.eval() A__ = tokenizer.encode_plus(lowercase_ , return_tensors='''pt''' ) A__ = hf_bort_model(**lowercase_ )[0] A__ = output_gluon[0].asnumpy() A__ = output_hf[0].detach().numpy() A__ = np.max(np.abs(hf_layer - gluon_layer ) ).item() A__ = np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) if success: print('''✔️ Both model do output the same tensors''' ) else: print('''❌ Both model do **NOT** output the same tensors''' ) print('''Absolute difference is:''' , lowercase_ ) if __name__ == "__main__": _lowerCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCamelCase : str = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
87
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp _lowerCamelCase : str = 5 _lowerCamelCase : int = 10 @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = SpeechaTextTokenizer UpperCAmelCase__ = False UpperCAmelCase__ = True def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' super().setUp() A__ = sp.SentencePieceProcessor() spm_model.Load(UpperCAmelCase__) A__ = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_) for id_ in range(len(UpperCAmelCase__))] A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__)))) A__ = Path(self.tmpdirname) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file''']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file''']) A__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]: '''simple docstring''' A__ = '''<pad>''' A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<s>''') self.assertEqual(vocab_keys[1] , '''<pad>''') self.assertEqual(vocab_keys[-1] , '''j''') self.assertEqual(len(UpperCAmelCase__) , 1_001) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_001) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' A__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) A__ = tokenizer.tokenize('''This is a test''') self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , [289, 50, 14, 174, 386] , ) A__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') self.assertListEqual( UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) A__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__) self.assertListEqual(UpperCAmelCase__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8]) A__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__) self.assertListEqual( UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: '''simple docstring''' A__ = {'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , ) @require_sentencepiece class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = '''valhalla/s2t_mustc_multilinguial_medium''' UpperCAmelCase__ = '''C\'est trop cool''' UpperCAmelCase__ = '''Esto es genial''' @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict) ->Dict: '''simple docstring''' A__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name) return cls def SCREAMING_SNAKE_CASE ( self : str) ->Dict: '''simple docstring''' self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' self.assertEqual(self.tokenizer.vocab_size , 10_000) def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids) A__ = [ES_CODE, 4, 1_601, 47, 7_647, 2] A__ = self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__) A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->str: '''simple docstring''' A__ = '''fr''' A__ = self.tokenizer(self.french_text).input_ids self.assertEqual(encoded[0] , UpperCAmelCase__) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' A__ = '''fr''' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE]) A__ = '''es''' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
87
1
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = JukeboxTokenizer UpperCAmelCase__ = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' import torch A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''') A__ = tokenizer(**self.metas)['''input_ids'''] # fmt: off A__ = [ torch.tensor([[ 0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]]), torch.tensor([[0, 0, 0, 1_069, 11]]), torch.tensor([[0, 0, 0, 1_069, 11]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2])) @require_torch def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]: '''simple docstring''' import torch A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''') A__ = tokenizer(**self.metas)['''input_ids'''] # fmt: off A__ = [ torch.tensor([[ 0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]]), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
87
from __future__ import annotations import requests def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict: """simple docstring""" A__ = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty""" return requests.get(lowercase_ ).json() def SCREAMING_SNAKE_CASE ( lowercase_ = 10 ) -> list[dict]: """simple docstring""" A__ = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty''' A__ = requests.get(lowercase_ ).json()[:max_stories] return [get_hackernews_story(lowercase_ ) for story_id in story_ids] def SCREAMING_SNAKE_CASE ( lowercase_ = 10 ) -> str: """simple docstring""" A__ = hackernews_top_stories(lowercase_ ) return "\n".join('''* [{title}]({url})'''.format(**lowercase_ ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
87
1
from __future__ import annotations from math import pow, sqrt def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, float]: """simple docstring""" if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if resistance == 0: return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
87
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _lowerCamelCase : Optional[List[str]] = None _lowerCamelCase : int = """<""" if sys.byteorder == """little""" else """>""" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _lowerCamelCase : Union[str, Any] = [ np.dtype("""|b1"""), np.dtype("""|u1"""), np.dtype("""<u2"""), np.dtype(""">u2"""), np.dtype("""<i2"""), np.dtype(""">i2"""), np.dtype("""<u4"""), np.dtype(""">u4"""), np.dtype("""<i4"""), np.dtype(""">i4"""), np.dtype("""<f4"""), np.dtype(""">f4"""), np.dtype("""<f8"""), np.dtype(""">f8"""), ] @dataclass class UpperCamelCase_ : '''simple docstring''' UpperCAmelCase__ = True UpperCAmelCase__ = None # Automatically constructed UpperCAmelCase__ = "PIL.Image.Image" UpperCAmelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) UpperCAmelCase__ = field(default='''Image''' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ ) def __call__( self : List[str]) ->List[str]: '''simple docstring''' return self.pa_type def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) ->dict: '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''') if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = np.array(UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__): return {"path": value, "bytes": None} elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): return {"path": None, "bytes": value} elif isinstance(UpperCAmelCase__ , np.ndarray): # convert the image array to PNG/TIFF bytes return encode_np_array(UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , PIL.Image.Image): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(UpperCAmelCase__) elif value.get('''path''') is not None and os.path.isfile(value['''path''']): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('''path''')} elif value.get('''bytes''') is not None or value.get('''path''') is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('''bytes'''), "path": value.get('''path''')} else: raise ValueError( f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""") def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : dict , UpperCAmelCase__ : str=None) ->"PIL.Image.Image": '''simple docstring''' if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''') if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support decoding images, please install \'Pillow\'.''') if token_per_repo_id is None: A__ = {} A__ , A__ = value['''path'''], value['''bytes'''] if bytes_ is None: if path is None: raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""") else: if is_local_path(UpperCAmelCase__): A__ = PIL.Image.open(UpperCAmelCase__) else: A__ = path.split('''::''')[-1] try: A__ = string_to_dict(UpperCAmelCase__ , config.HUB_DATASETS_URL)['''repo_id'''] A__ = token_per_repo_id.get(UpperCAmelCase__) except ValueError: A__ = None with xopen(UpperCAmelCase__ , '''rb''' , use_auth_token=UpperCAmelCase__) as f: A__ = BytesIO(f.read()) A__ = PIL.Image.open(bytes_) else: A__ = PIL.Image.open(BytesIO(bytes_)) image.load() # to avoid "Too many open files" errors return image def SCREAMING_SNAKE_CASE ( self : Dict) ->Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Value return ( self if self.decode else { "bytes": Value('''binary'''), "path": Value('''string'''), } ) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[pa.StringArray, pa.StructArray, pa.ListArray]) ->pa.StructArray: '''simple docstring''' if pa.types.is_string(storage.type): A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.binary()) A__ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null()) elif pa.types.is_binary(storage.type): A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string()) A__ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null()) elif pa.types.is_struct(storage.type): if storage.type.get_field_index('''bytes''') >= 0: A__ = storage.field('''bytes''') else: A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.binary()) if storage.type.get_field_index('''path''') >= 0: A__ = storage.field('''path''') else: A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string()) A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null()) elif pa.types.is_list(storage.type): A__ = pa.array( [encode_np_array(np.array(UpperCAmelCase__))['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string()) A__ = pa.StructArray.from_arrays( [bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null()) return array_cast(UpperCAmelCase__ , self.pa_type) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : pa.StructArray) ->pa.StructArray: '''simple docstring''' @no_op_if_value_is_null def path_to_bytes(UpperCAmelCase__ : Dict): with xopen(UpperCAmelCase__ , '''rb''') as f: A__ = f.read() return bytes_ A__ = pa.array( [ (path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) A__ = pa.array( [os.path.basename(UpperCAmelCase__) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , ) A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null()) return array_cast(UpperCAmelCase__ , self.pa_type) def SCREAMING_SNAKE_CASE ( ) -> List[str]: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() A__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bytes: """simple docstring""" A__ = BytesIO() if image.format in list_image_compression_formats(): A__ = image.format else: A__ = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF''' image.save(lowercase_ , format=lowercase_ ) return buffer.getvalue() def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict: """simple docstring""" if hasattr(lowercase_ , '''filename''' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(lowercase_ )} def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) A__ = array.dtype A__ = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER A__ = dtype.kind A__ = dtype.itemsize A__ = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: A__ = np.dtype('''|u1''' ) if dtype_kind not in ["u", "i"]: raise TypeError( f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: A__ = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: A__ = dtype_byteorder + dtype_kind + str(lowercase_ ) A__ = np.dtype(lowercase_ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) A__ = PIL.Image.fromarray(array.astype(lowercase_ ) ) return {"path": None, "bytes": image_to_bytes(lowercase_ )} def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[dict]: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if objs: A__ , A__ = first_non_null_value(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(lowercase_ , np.ndarray ): A__ = no_op_if_value_is_null(lowercase_ ) return [obj_to_image_dict_func(lowercase_ ) for obj in objs] elif isinstance(lowercase_ , PIL.Image.Image ): A__ = no_op_if_value_is_null(lowercase_ ) return [obj_to_image_dict_func(lowercase_ ) for obj in objs] else: return objs else: return objs
87
1
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = GPTSwaTokenizer UpperCAmelCase__ = False UpperCAmelCase__ = True UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : int) ->Tuple: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = GPTSwaTokenizer(UpperCAmelCase__ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''') tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : List[Any]) ->Optional[Any]: '''simple docstring''' A__ = '''This is a test''' A__ = '''This is a test''' return input_text, output_text def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = '''<s>''' A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<unk>''') self.assertEqual(vocab_keys[1] , '''<s>''') self.assertEqual(vocab_keys[-1] , '''j''') self.assertEqual(len(UpperCAmelCase__) , 2_000) def SCREAMING_SNAKE_CASE ( self : int) ->Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 2_000) def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]: '''simple docstring''' A__ = GPTSwaTokenizer(UpperCAmelCase__) A__ = tokenizer.tokenize('''This is a test''') self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , [465, 287, 265, 631, 842]) A__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') # fmt: off self.assertListEqual( UpperCAmelCase__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , ) # fmt: on A__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__) self.assertListEqual( UpperCAmelCase__ , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) A__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__) # fmt: off self.assertListEqual( UpperCAmelCase__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.''']) # fmt: on def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' A__ = GPTSwaTokenizer(UpperCAmelCase__) A__ = ['''This is a test''', '''I was born in 92000, and this is falsé.'''] A__ = [ [465, 287, 265, 631, 842], [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(UpperCAmelCase__ , UpperCAmelCase__): self.assertListEqual(tokenizer.encode_fast(UpperCAmelCase__) , UpperCAmelCase__) # Test that decode_fast returns the input text for text, token_ids in zip(UpperCAmelCase__ , UpperCAmelCase__): self.assertEqual(tokenizer.decode_fast(UpperCAmelCase__) , UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Any) ->int: '''simple docstring''' A__ = [ '''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''', '''Hey there, how are you doing this fine day?''', '''This is a text with a trailing spaces followed by a dot .''', '''Häj sväjs lillebrör! =)''', '''Det är inget fel på Mr. Cool''', ] # fmt: off A__ = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=UpperCAmelCase__ , )
87
from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) UpperCAmelCase__ = ( { '''feature-extraction''': TFMobileBertModel, '''fill-mask''': TFMobileBertForMaskedLM, '''question-answering''': TFMobileBertForQuestionAnswering, '''text-classification''': TFMobileBertForSequenceClassification, '''token-classification''': TFMobileBertForTokenClassification, '''zero-shot''': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=False) ->Optional[Any]: '''simple docstring''' A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__) if return_labels: if model_class in get_values(UpperCAmelCase__): A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa) return inputs_dict class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : List[str]=32 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]=512 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Tuple=None , ) ->Any: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope A__ = embedding_size def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) A__ = ids_tensor([self.batch_size] , self.num_choices) A__ = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]) ->Any: '''simple docstring''' A__ = TFMobileBertModel(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) A__ = [input_ids, input_mask] A__ = model(UpperCAmelCase__) A__ = model(UpperCAmelCase__) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple) ->Optional[Any]: '''simple docstring''' A__ = TFMobileBertForMaskedLM(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]) ->int: '''simple docstring''' A__ = TFMobileBertForNextSentencePrediction(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int) ->List[Any]: '''simple docstring''' A__ = TFMobileBertForPreTraining(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple) ->Dict: '''simple docstring''' A__ = self.num_labels A__ = TFMobileBertForSequenceClassification(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int) ->Dict: '''simple docstring''' A__ = self.num_choices A__ = TFMobileBertForMultipleChoice(config=UpperCAmelCase__) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]) ->int: '''simple docstring''' A__ = self.num_labels A__ = TFMobileBertForTokenClassification(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ = TFMobileBertForQuestionAnswering(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE ( self : Any) ->str: '''simple docstring''' A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = TFMobileBertModelTest.TFMobileBertModelTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' for model_name in ["google/mobilebert-uncased"]: A__ = TFMobileBertModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) @require_tf class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any: '''simple docstring''' A__ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''') A__ = tf.constant([[0, 1, 2, 3, 4, 5]]) A__ = model(UpperCAmelCase__)[0] A__ = [1, 6, 30_522] self.assertEqual(output.shape , UpperCAmelCase__) A__ = tf.constant( [ [ [-4.5919547, -9.248295, -9.645256], [-6.7306175, -6.440284, -6.6052837], [-7.2743506, -6.7847915, -6.024673], ] ]) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)
87
1
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int] , ) ->List[Any]: '''simple docstring''' A__ = parent A__ = 13 A__ = 7 A__ = 30 A__ = self.seq_length + self.mem_len A__ = 15 A__ = True A__ = True A__ = 99 A__ = [10, 50, 80] A__ = 32 A__ = 32 A__ = 4 A__ = 8 A__ = 128 A__ = 2 A__ = 2 A__ = None A__ = 1 A__ = 0 A__ = 3 A__ = self.vocab_size - 1 A__ = 0.01 def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' random.seed(self.seed) tf.random.set_seed(self.seed) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]) ->Optional[int]: '''simple docstring''' A__ = TFTransfoXLModel(UpperCAmelCase__) A__ , A__ = model(UpperCAmelCase__).to_tuple() A__ = {'''input_ids''': input_ids_a, '''mems''': mems_a} A__ , A__ = model(UpperCAmelCase__).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple) ->Optional[Any]: '''simple docstring''' A__ = TFTransfoXLLMHeadModel(UpperCAmelCase__) A__ , A__ = model(UpperCAmelCase__).to_tuple() A__ = {'''input_ids''': input_ids_a, '''labels''': lm_labels} A__ , A__ = model(UpperCAmelCase__).to_tuple() A__ , A__ = model([input_ids_a, mems_a]).to_tuple() A__ = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels} A__ , A__ = model(UpperCAmelCase__).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int]) ->str: '''simple docstring''' A__ = TFTransfoXLForSequenceClassification(UpperCAmelCase__) A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() ((A__) , (A__) , (A__) , (A__)) = config_and_inputs A__ = {'''input_ids''': input_ids_a} return config, inputs_dict @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) UpperCAmelCase__ = () if is_tf_available() else () UpperCAmelCase__ = ( { '''feature-extraction''': TFTransfoXLModel, '''text-classification''': TFTransfoXLForSequenceClassification, '''text-generation''': TFTransfoXLLMHeadModel, '''zero-shot''': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]) ->Dict: '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]: '''simple docstring''' A__ = TFTransfoXLModelTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str: '''simple docstring''' self.model_tester.set_seed() A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' self.model_tester.set_seed() A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: A__ = model_class(UpperCAmelCase__) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer) if model_class in list_other_models_with_output_ebd: A__ = model.get_output_embeddings() assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer) A__ = model.get_bias() assert name is None else: A__ = model.get_output_embeddings() assert x is None A__ = model.get_bias() assert name is None def SCREAMING_SNAKE_CASE ( self : Any) ->int: '''simple docstring''' pass @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->Any: '''simple docstring''' for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = TFTransfoXLModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) @unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''') def SCREAMING_SNAKE_CASE ( self : str) ->str: '''simple docstring''' pass @require_tf class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip('''Skip test until #12651 is resolved.''') @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]: '''simple docstring''' A__ = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''') # fmt: off A__ = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off A__ = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> A__ = model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__) self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__)
87
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : str=224 , UpperCAmelCase__ : str=30 , UpperCAmelCase__ : Tuple=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Tuple=[0.5, 0.5, 0.5] , ) ->str: '''simple docstring''' A__ = size if size is not None else {'''height''': 18, '''width''': 18} A__ = parent A__ = batch_size A__ = num_channels A__ = image_size A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : List[str]) ->str: '''simple docstring''' A__ = EfficientFormerImageProcessorTester(self) @property def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''')) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image) # Test not batched input A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray) # Test not batched input A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor) # Test not batched input A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , )
87
1
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]) ->Optional[Any]: '''simple docstring''' A__ = dataset A__ = process A__ = params def __len__( self : List[Any]) ->Any: '''simple docstring''' return len(self.dataset) def __getitem__( self : List[str] , UpperCAmelCase__ : int) ->List[str]: '''simple docstring''' A__ = self.dataset[i] A__ = self.process(UpperCAmelCase__ , **self.params) return processed class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=None) ->List[str]: '''simple docstring''' A__ = loader A__ = infer A__ = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether A__ = None A__ = loader_batch_size # Internal bookkeeping A__ = None A__ = None def __len__( self : int) ->Tuple: '''simple docstring''' return len(self.loader) def __iter__( self : int) ->List[str]: '''simple docstring''' A__ = iter(self.loader) return self def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: '''simple docstring''' if isinstance(self._loader_batch_data , torch.Tensor): # Batch data is simple tensor, just fetch the slice A__ = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) A__ = {} for k, element in self._loader_batch_data.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # Convert ModelOutput to tuple first A__ = element.to_tuple() if isinstance(element[0] , torch.Tensor): A__ = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) elif isinstance(element[0] , np.ndarray): A__ = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCAmelCase__ , UpperCAmelCase__): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor): A__ = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) elif isinstance(element[0] , np.ndarray): A__ = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element) continue if element is None: # This can happen for optional data that get passed around A__ = None elif isinstance(element[self._loader_batch_index] , torch.Tensor): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers A__ = element[self._loader_batch_index].unsqueeze(0) elif isinstance(element[self._loader_batch_index] , np.ndarray): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers A__ = np.expand_dims(element[self._loader_batch_index] , 0) else: # This is typically a list, so no need to `unsqueeze`. A__ = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 A__ = self._loader_batch_data.__class__(UpperCAmelCase__) self._loader_batch_index += 1 return result def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch A__ = next(self.iterator) A__ = self.infer(UpperCAmelCase__ , **self.params) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(UpperCAmelCase__ , torch.Tensor): A__ = processed else: A__ = list(processed.keys())[0] A__ = processed[key] if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = len(UpperCAmelCase__) else: A__ = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. A__ = observed_batch_size # Setting internal index to unwrap the batch A__ = processed A__ = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]=None) ->Optional[Any]: '''simple docstring''' super().__init__(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def __iter__( self : Any) ->int: '''simple docstring''' A__ = iter(self.loader) A__ = None return self def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict: '''simple docstring''' if self.subiterator is None: A__ = self.infer(next(self.iterator) , **self.params) try: # Try to return next item A__ = next(self.subiterator) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators A__ = self.infer(next(self.iterator) , **self.params) A__ = next(self.subiterator) return processed class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __iter__( self : List[str]) ->List[str]: '''simple docstring''' A__ = iter(self.loader) return self def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]: '''simple docstring''' A__ = False A__ = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: A__ = self.loader_batch_item() A__ = item.pop('''is_last''') accumulator.append(UpperCAmelCase__) if is_last: return accumulator while not is_last: A__ = self.infer(next(self.iterator) , **self.params) if self.loader_batch_size is not None: if isinstance(UpperCAmelCase__ , torch.Tensor): A__ = processed else: A__ = list(processed.keys())[0] A__ = processed[key] if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = len(UpperCAmelCase__) else: A__ = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. A__ = observed_batch_size A__ = processed A__ = 0 while self._loader_batch_index < self.loader_batch_size: A__ = self.loader_batch_item() A__ = item.pop('''is_last''') accumulator.append(UpperCAmelCase__) if is_last: return accumulator else: A__ = processed A__ = item.pop('''is_last''') accumulator.append(UpperCAmelCase__) return accumulator class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase__ : Dataset , UpperCAmelCase__ : str) ->str: '''simple docstring''' A__ = dataset A__ = key def __len__( self : Dict) ->Optional[int]: '''simple docstring''' return len(self.dataset) def __getitem__( self : int , UpperCAmelCase__ : int) ->Any: '''simple docstring''' return self.dataset[i][self.key] class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase__ : Dataset , UpperCAmelCase__ : str , UpperCAmelCase__ : str) ->Optional[int]: '''simple docstring''' A__ = dataset A__ = keya A__ = keya def __len__( self : List[Any]) ->Optional[Any]: '''simple docstring''' return len(self.dataset) def __getitem__( self : str , UpperCAmelCase__ : int) ->List[str]: '''simple docstring''' return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
87
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance _lowerCamelCase : Dict = 6_378_137.0 _lowerCamelCase : Union[str, Any] = 6_356_752.314_245 _lowerCamelCase : List[Any] = 6378137 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> float: """simple docstring""" A__ = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) ) A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius A__ = haversine_distance(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) / EQUATORIAL_RADIUS # Intermediate P and Q values A__ = (b_lata + b_lata) / 2 A__ = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) A__ = (sin(lowercase_ ) ** 2) * (cos(lowercase_ ) ** 2) A__ = cos(sigma / 2 ) ** 2 A__ = (sigma - sin(lowercase_ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) A__ = (cos(lowercase_ ) ** 2) * (sin(lowercase_ ) ** 2) A__ = sin(sigma / 2 ) ** 2 A__ = (sigma + sin(lowercase_ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
87
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''naver-clova-ix/donut-base-finetuned-docvqa''' UpperCAmelCase__ = ( '''This is a tool that answers a question about an document (pdf). It takes an input named `document` which ''' '''should be the document containing the information, as well as a `question` that is the question about the ''' '''document. It returns a text that contains the answer to the question.''' ) UpperCAmelCase__ = '''document_qa''' UpperCAmelCase__ = AutoProcessor UpperCAmelCase__ = VisionEncoderDecoderModel UpperCAmelCase__ = ['''image''', '''text'''] UpperCAmelCase__ = ['''text'''] def __init__( self : int , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Dict) ->List[str]: '''simple docstring''' if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''') super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : "Image" , UpperCAmelCase__ : str) ->Optional[int]: '''simple docstring''' A__ = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' A__ = task_prompt.replace('''{user_input}''' , UpperCAmelCase__) A__ = self.pre_processor.tokenizer( UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors='''pt''').input_ids A__ = self.pre_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[str]) ->Any: '''simple docstring''' return self.model.generate( inputs['''pixel_values'''].to(self.device) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCAmelCase__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCAmelCase__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCAmelCase__ , ).sequences def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Optional[int]) ->Dict: '''simple docstring''' A__ = self.pre_processor.batch_decode(UpperCAmelCase__)[0] A__ = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''') A__ = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''') A__ = re.sub(R'''<.*?>''' , '''''' , UpperCAmelCase__ , count=1).strip() # remove first task start token A__ = self.pre_processor.tokenajson(UpperCAmelCase__) return sequence["answer"]
87
import heapq import sys import numpy as np _lowerCamelCase : Any = tuple[int, int] class UpperCamelCase_ : '''simple docstring''' def __init__( self : Any) ->str: '''simple docstring''' A__ = [] A__ = set() def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' if not self.empty(): return self.elements[0][0] else: return float('''inf''') def SCREAMING_SNAKE_CASE ( self : Tuple) ->str: '''simple docstring''' return len(self.elements) == 0 def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any]) ->List[str]: '''simple docstring''' if item not in self.set: heapq.heappush(self.elements , (priority, item)) self.set.add(UpperCAmelCase__) else: # update # print("update", item) A__ = [] ((A__) , (A__)) = heapq.heappop(self.elements) while x != item: temp.append((pri, x)) ((A__) , (A__)) = heapq.heappop(self.elements) temp.append((priority, item)) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[Any]) ->Union[str, Any]: '''simple docstring''' if item in self.set: self.set.remove(UpperCAmelCase__) A__ = [] ((A__) , (A__)) = heapq.heappop(self.elements) while x != item: temp.append((pro, x)) ((A__) , (A__)) = heapq.heappop(self.elements) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy)) def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' return self.elements[0][1] def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' ((A__) , (A__)) = heapq.heappop(self.elements) self.set.remove(UpperCAmelCase__) return (priority, item) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = np.array(lowercase_ ) A__ = np.array(lowercase_ ) return np.linalg.norm(a - b ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" return consistent_heuristic(lowercase_ , lowercase_ ) // t def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: """simple docstring""" A__ = g_function[start] + Wa * heuristics[i](lowercase_ , lowercase_ ) return ans def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = np.chararray((n, n) ) for i in range(lowercase_ ): for j in range(lowercase_ ): A__ = '''*''' for i in range(lowercase_ ): for j in range(lowercase_ ): if (j, (n - 1) - i) in blocks: A__ = '''#''' A__ = '''-''' A__ = back_pointer[goal] while x != start: ((A__) , (A__)) = x # print(x) A__ = '''-''' A__ = back_pointer[x] A__ = '''-''' for i in range(lowercase_ ): for j in range(lowercase_ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) A__ = back_pointer[goal] while x != start: print(lowercase_ , end=''' ''' ) A__ = back_pointer[x] print(lowercase_ ) sys.exit() def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Union[str, Any]: """simple docstring""" for itera in range(lowercase_ ): open_list[itera].remove_element(lowercase_ ) # print("s", s) # print("j", j) ((A__) , (A__)) = s A__ = (x - 1, y) A__ = (x + 1, y) A__ = (x, y + 1) A__ = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(lowercase_ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(lowercase_ ) A__ = -1 A__ = float('''inf''' ) if valid(lowercase_ ) and g_function[neighbours] > g_function[s] + 1: A__ = g_function[s] + 1 A__ = s if neighbours not in close_list_anchor: open_list[0].put(lowercase_ , key(lowercase_ , 0 , lowercase_ , lowercase_ ) ) if neighbours not in close_list_inad: for var in range(1 , lowercase_ ): if key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) <= Wa * key( lowercase_ , 0 , lowercase_ , lowercase_ ): open_list[j].put( lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) ) def SCREAMING_SNAKE_CASE ( ) -> Optional[int]: """simple docstring""" A__ = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list _lowerCamelCase : Dict = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} _lowerCamelCase : Optional[Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] _lowerCamelCase : Optional[int] = make_common_ground() _lowerCamelCase : Optional[Any] = blocks_blk # hyper parameters _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : List[Any] = 20 _lowerCamelCase : Any = 3 # one consistent and two other inconsistent # start and end destination _lowerCamelCase : str = (0, 0) _lowerCamelCase : Tuple = (n - 1, n - 1) _lowerCamelCase : int = 1 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = {start: 0, goal: float('''inf''' )} A__ = {start: -1, goal: -1} A__ = [] A__ = set() for i in range(lowercase_ ): open_list.append(PriorityQueue() ) open_list[i].put(lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) ) A__ = [] A__ = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , lowercase_ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(lowercase_ , lowercase_ , lowercase_ ) else: A__ , A__ = open_list[i].top_show() visited.add(lowercase_ ) expand_state( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) close_list_inad.append(lowercase_ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(lowercase_ , lowercase_ , lowercase_ ) else: A__ = open_list[0].top_show() visited.add(lowercase_ ) expand_state( lowercase_ , 0 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) close_list_anchor.append(lowercase_ ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(lowercase_ ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
87
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCamelCase : Any = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Union[str, Any] = ["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _lowerCamelCase : Optional[Any] = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine""" def SCREAMING_SNAKE_CASE ( ) -> Dict: """simple docstring""" A__ = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: A__ = get_sagemaker_input() else: A__ = get_cluster_input() return config def SCREAMING_SNAKE_CASE ( lowercase_=None ) -> List[Any]: """simple docstring""" if subparsers is not None: A__ = subparsers.add_parser('''config''' , description=lowercase_ ) else: A__ = argparse.ArgumentParser('''Accelerate config command''' , description=lowercase_ ) parser.add_argument( '''--config_file''' , default=lowercase_ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=lowercase_ ) return parser def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = get_user_input() if args.config_file is not None: A__ = args.config_file else: if not os.path.isdir(lowercase_ ): os.makedirs(lowercase_ ) A__ = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(lowercase_ ) else: config.to_yaml_file(lowercase_ ) print(f"""accelerate configuration saved at {config_file}""" ) def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]: """simple docstring""" A__ = config_command_parser() A__ = parser.parse_args() config_command(lowercase_ ) if __name__ == "__main__": main()
87
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : Dict = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''table-transformer''' UpperCAmelCase__ = ['''past_key_values'''] UpperCAmelCase__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : Optional[Any] , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : Any=100 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : List[str]=2_048 , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : Optional[int]=2_048 , UpperCAmelCase__ : str=8 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Tuple=256 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : List[Any]=1.0 , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Optional[int]="sine" , UpperCAmelCase__ : Tuple="resnet50" , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : str=5 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Optional[Any]=0.1 , **UpperCAmelCase__ : Any , ) ->List[str]: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''') if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''') A__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4''']) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = backbone_config.get('''model_type''') A__ = CONFIG_MAPPING[backbone_model_type] A__ = config_class.from_dict(UpperCAmelCase__) # set timm attributes to None A__ , A__ , A__ = None, None, None A__ = use_timm_backbone A__ = backbone_config A__ = num_channels A__ = num_queries A__ = d_model A__ = encoder_ffn_dim A__ = encoder_layers A__ = encoder_attention_heads A__ = decoder_ffn_dim A__ = decoder_layers A__ = decoder_attention_heads A__ = dropout A__ = attention_dropout A__ = activation_dropout A__ = activation_function A__ = init_std A__ = init_xavier_std A__ = encoder_layerdrop A__ = decoder_layerdrop A__ = encoder_layers A__ = auxiliary_loss A__ = position_embedding_type A__ = backbone A__ = use_pretrained_backbone A__ = dilation # Hungarian matcher A__ = class_cost A__ = bbox_cost A__ = giou_cost # Loss coefficients A__ = mask_loss_coefficient A__ = dice_loss_coefficient A__ = bbox_loss_coefficient A__ = giou_loss_coefficient A__ = eos_coefficient super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__) @property def SCREAMING_SNAKE_CASE ( self : Any) ->int: '''simple docstring''' return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: '''simple docstring''' return self.d_model class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self : int) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ]) @property def SCREAMING_SNAKE_CASE ( self : List[str]) ->float: '''simple docstring''' return 1e-5 @property def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: '''simple docstring''' return 12
87
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() _lowerCamelCase : int = logging.get_logger("""transformers.models.speecht5""") def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple: """simple docstring""" hf_model.apply_weight_norm() A__ = checkpoint['''input_conv.weight_g'''] A__ = checkpoint['''input_conv.weight_v'''] A__ = checkpoint['''input_conv.bias'''] for i in range(len(config.upsample_rates ) ): A__ = checkpoint[f"""upsamples.{i}.1.weight_g"""] A__ = checkpoint[f"""upsamples.{i}.1.weight_v"""] A__ = checkpoint[f"""upsamples.{i}.1.bias"""] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""] A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""] A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""] A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""] A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""] A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""] A__ = checkpoint['''output_conv.1.weight_g'''] A__ = checkpoint['''output_conv.1.weight_v'''] A__ = checkpoint['''output_conv.1.bias'''] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , ) -> str: """simple docstring""" if config_path is not None: A__ = SpeechTaHifiGanConfig.from_pretrained(lowercase_ ) else: A__ = SpeechTaHifiGanConfig() A__ = SpeechTaHifiGan(lowercase_ ) A__ = torch.load(lowercase_ ) load_weights(orig_checkpoint['''model''']['''generator'''] , lowercase_ , lowercase_ ) A__ = np.load(lowercase_ ) A__ = stats[0].reshape(-1 ) A__ = stats[1].reshape(-1 ) A__ = torch.from_numpy(lowercase_ ).float() A__ = torch.from_numpy(lowercase_ ).float() model.save_pretrained(lowercase_ ) if repo_id: print('''Pushing to the hub...''' ) model.push_to_hub(lowercase_ ) if __name__ == "__main__": _lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) _lowerCamelCase : List[str] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
87
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : List[Any] = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { """MIT/ast-finetuned-audioset-10-10-0.4593""": ( """https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json""" ), } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''audio-spectrogram-transformer''' def __init__( self : Tuple , UpperCAmelCase__ : Dict=768 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Optional[int]=1e-12 , UpperCAmelCase__ : Optional[int]=16 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : Any=10 , UpperCAmelCase__ : Optional[Any]=1_024 , UpperCAmelCase__ : List[str]=128 , **UpperCAmelCase__ : Optional[Any] , ) ->Optional[Any]: '''simple docstring''' super().__init__(**UpperCAmelCase__) A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = initializer_range A__ = layer_norm_eps A__ = patch_size A__ = qkv_bias A__ = frequency_stride A__ = time_stride A__ = max_length A__ = num_mel_bins
87
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase_ : '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : Dict=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=50 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=None , ) ->Union[str, Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = initializer_range A__ = use_labels A__ = scope def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = self.get_config() return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE ( self : int) ->int: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = self.prepare_config_and_inputs() A__ = True A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] , ) ->Dict: '''simple docstring''' A__ = BertGenerationEncoder(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__) A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] , ) ->Dict: '''simple docstring''' A__ = True A__ = BertGenerationEncoder(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , ) A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[int] , ) ->Any: '''simple docstring''' A__ = True A__ = True A__ = BertGenerationDecoder(config=UpperCAmelCase__).to(UpperCAmelCase__).eval() # first forward pass A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , ) A__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size) A__ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens] , dim=-1) A__ = torch.cat([input_mask, next_mask] , dim=-1) A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0] A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0] # select random slice A__ = ids_tensor((1,) , output_from_past.shape[-1]).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3)) def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , *UpperCAmelCase__ : List[str] , ) ->List[Any]: '''simple docstring''' A__ = BertGenerationDecoder(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ , A__ , A__ , A__ = self.prepare_config_and_inputs() A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () UpperCAmelCase__ = (BertGenerationDecoder,) if is_torch_available() else () UpperCAmelCase__ = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' A__ = BertGenerationEncoderTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() A__ = '''bert''' self.model_tester.create_and_check_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() A__ = None self.model_tester.create_and_check_model_as_decoder( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]: '''simple docstring''' A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') self.assertIsNotNone(UpperCAmelCase__) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]]) with torch.no_grad(): A__ = model(UpperCAmelCase__)[0] A__ = torch.Size([1, 8, 1_024]) self.assertEqual(output.shape , UpperCAmelCase__) A__ = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' A__ = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]]) with torch.no_grad(): A__ = model(UpperCAmelCase__)[0] A__ = torch.Size([1, 8, 50_358]) self.assertEqual(output.shape , UpperCAmelCase__) A__ = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4))
87
1
from collections.abc import Generator from math import sin def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bytes: """simple docstring""" if len(lowercase_ ) != 32: raise ValueError('''Input must be of length 32''' ) A__ = b'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bytes: """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''' ) A__ = format(lowercase_ , '''08x''' )[-8:] A__ = b'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bytes: """simple docstring""" A__ = b'''''' for char in message: bit_string += format(lowercase_ , '''08b''' ).encode('''utf-8''' ) A__ = format(len(lowercase_ ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(lowercase_ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Generator[list[int], None, None]: """simple docstring""" if len(lowercase_ ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(lowercase_ ) , 512 ): A__ = bit_string[pos : pos + 512] A__ = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''' ) A__ = format(lowercase_ , '''032b''' ) A__ = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(lowercase_ , 2 ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" return (a + b) % 2**32 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bytes: """simple docstring""" A__ = preprocess(lowercase_ ) A__ = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states A__ = 0x67_452_301 A__ = 0xEF_CDA_B89 A__ = 0x98_BAD_CFE A__ = 0x10_325_476 A__ = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(lowercase_ ): A__ = aa A__ = ba A__ = ca A__ = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f A__ = d ^ (b & (c ^ d)) A__ = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f A__ = c ^ (d & (b ^ c)) A__ = (5 * i + 1) % 16 elif i <= 47: A__ = b ^ c ^ d A__ = (3 * i + 5) % 16 else: A__ = c ^ (b | not_aa(lowercase_ )) A__ = (7 * i) % 16 A__ = (f + a + added_consts[i] + block_words[g]) % 2**32 A__ = d A__ = c A__ = b A__ = sum_aa(lowercase_ , left_rotate_aa(lowercase_ , shift_amounts[i] ) ) # Add hashed chunk to running total A__ = sum_aa(lowercase_ , lowercase_ ) A__ = sum_aa(lowercase_ , lowercase_ ) A__ = sum_aa(lowercase_ , lowercase_ ) A__ = sum_aa(lowercase_ , lowercase_ ) A__ = reformat_hex(lowercase_ ) + reformat_hex(lowercase_ ) + reformat_hex(lowercase_ ) + reformat_hex(lowercase_ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
87
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging _lowerCamelCase : int = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict: """simple docstring""" A__ = set() A__ = [] def parse_line(lowercase_ ): for line in fp: if isinstance(lowercase_ , lowercase_ ): A__ = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(lowercase_ ) > 0: A__ = '''\n'''.join(lowercase_ ) # Only keep the warnings specified in `targets` if any(f""": {x}: """ in warning for x in targets ): selected_warnings.add(lowercase_ ) buffer.clear() continue else: A__ = line.strip() buffer.append(lowercase_ ) if from_gh: for filename in os.listdir(lowercase_ ): A__ = os.path.join(lowercase_ , lowercase_ ) if not os.path.isdir(lowercase_ ): # read the file if filename != "warnings.txt": continue with open(lowercase_ ) as fp: parse_line(lowercase_ ) else: try: with zipfile.ZipFile(lowercase_ ) as z: for filename in z.namelist(): if not os.path.isdir(lowercase_ ): # read the file if filename != "warnings.txt": continue with z.open(lowercase_ ) as fp: parse_line(lowercase_ ) except Exception: logger.warning( f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" ) return selected_warnings def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = set() A__ = [os.path.join(lowercase_ , lowercase_ ) for p in os.listdir(lowercase_ ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(lowercase_ , lowercase_ ) ) return selected_warnings if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" return values.split(''',''' ) _lowerCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) _lowerCamelCase : List[Any] = parser.parse_args() _lowerCamelCase : List[str] = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links _lowerCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts _lowerCamelCase : Any = extract_warnings(args.output_dir, args.targets) _lowerCamelCase : Optional[Any] = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
87
1
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType _lowerCamelCase : Tuple = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''vision-encoder-decoder''' UpperCAmelCase__ = True def __init__( self : Tuple , **UpperCAmelCase__ : List[str]) ->Optional[Any]: '''simple docstring''' super().__init__(**UpperCAmelCase__) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"""A configuraton of type {self.model_type} cannot be instantiated because """ f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""") A__ = kwargs.pop('''encoder''') A__ = encoder_config.pop('''model_type''') A__ = kwargs.pop('''decoder''') A__ = decoder_config.pop('''model_type''') A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__) A__ = True @classmethod def SCREAMING_SNAKE_CASE ( cls : Tuple , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : List[Any]) ->PretrainedConfig: '''simple docstring''' logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''') A__ = True A__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ = copy.deepcopy(self.__dict__) A__ = self.encoder.to_dict() A__ = self.decoder.to_dict() A__ = self.__class__.model_type return output class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self : Dict) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ]) @property def SCREAMING_SNAKE_CASE ( self : List[Any]) ->float: '''simple docstring''' return 1e-4 @property def SCREAMING_SNAKE_CASE ( self : List[str]) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}}) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' A__ = OrderedDict() A__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} A__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} A__ = {0: '''batch''', 1: '''encoder_sequence'''} return common_inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , ) ->Mapping[str, Any]: '''simple docstring''' import torch A__ = OrderedDict() A__ = super().generate_dummy_inputs( UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__) A__ , A__ = dummy_input['''input_ids'''].shape A__ = (batch, encoder_sequence, self._config.encoder_hidden_size) A__ = dummy_input.pop('''input_ids''') A__ = dummy_input.pop('''attention_mask''') A__ = torch.zeros(UpperCAmelCase__) return common_inputs class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' @property def SCREAMING_SNAKE_CASE ( self : List[Any]) ->None: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : PretrainedConfig) ->OnnxConfig: '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default") ->OnnxConfig: '''simple docstring''' A__ = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__)
87
class UpperCamelCase_ : # Public class to implement a graph '''simple docstring''' def __init__( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->None: '''simple docstring''' A__ = row A__ = col A__ = graph def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->bool: '''simple docstring''' return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->None: '''simple docstring''' A__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order A__ = [-1, 0, 1, -1, 1, -1, 0, 1] A__ = True # Make those cells visited for k in range(8): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: # And finally, count all islands. '''simple docstring''' A__ = [[False for j in range(self.COL)] for i in range(self.ROW)] A__ = 0 for i in range(self.ROW): for j in range(self.COL): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) count += 1 return count
87
1
_lowerCamelCase : int = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) _lowerCamelCase : Union[str, Any] = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float: """simple docstring""" A__ = from_type.lower().strip('''s''' ) A__ = to_type.lower().strip('''s''' ) A__ = UNIT_SYMBOL.get(lowercase_ , lowercase_ ) A__ = UNIT_SYMBOL.get(lowercase_ , lowercase_ ) if from_sanitized not in METRIC_CONVERSION: A__ = ( f"""Invalid 'from_type' value: {from_type!r}.\n""" f"""Conversion abbreviations are: {", ".join(lowercase_ )}""" ) raise ValueError(lowercase_ ) if to_sanitized not in METRIC_CONVERSION: A__ = ( f"""Invalid 'to_type' value: {to_type!r}.\n""" f"""Conversion abbreviations are: {", ".join(lowercase_ )}""" ) raise ValueError(lowercase_ ) A__ = METRIC_CONVERSION[from_sanitized] A__ = METRIC_CONVERSION[to_sanitized] A__ = 1 if from_exponent > to_exponent: A__ = from_exponent - to_exponent else: A__ = -(to_exponent - from_exponent) return value * pow(10 , lowercase_ ) if __name__ == "__main__": from doctest import testmod testmod()
87
from __future__ import annotations import requests _lowerCamelCase : str = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 1 , lowercase_ = "new" , lowercase_ = None ) -> dict: """simple docstring""" A__ = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(lowercase_ ) - valid_terms ) ): A__ = f"""Invalid search term: {invalid_search_terms}""" raise ValueError(lowercase_ ) A__ = requests.get( f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , ) if response.status_code == 429: raise requests.HTTPError A__ = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(lowercase_ )} A__ = {} for id_ in range(lowercase_ ): A__ = { item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
87
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCamelCase : Any = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''convnextv2''' def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : int=1e-12 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : int=224 , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : Optional[int] , ) ->Any: '''simple docstring''' super().__init__(**UpperCAmelCase__) A__ = num_channels A__ = patch_size A__ = num_stages A__ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes A__ = [3, 3, 9, 3] if depths is None else depths A__ = hidden_act A__ = initializer_range A__ = layer_norm_eps A__ = drop_path_rate A__ = image_size A__ = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths) + 1)] A__ , A__ = get_aligned_output_features_output_indices( out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names)
87
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = JukeboxTokenizer UpperCAmelCase__ = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' import torch A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''') A__ = tokenizer(**self.metas)['''input_ids'''] # fmt: off A__ = [ torch.tensor([[ 0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]]), torch.tensor([[0, 0, 0, 1_069, 11]]), torch.tensor([[0, 0, 0, 1_069, 11]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2])) @require_torch def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]: '''simple docstring''' import torch A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''') A__ = tokenizer(**self.metas)['''input_ids'''] # fmt: off A__ = [ torch.tensor([[ 0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]]), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
87
1
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _lowerCamelCase : int = _symbol_database.Default() _lowerCamelCase : Tuple = _descriptor_pool.Default().AddSerializedFile( B"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03""" ) _lowerCamelCase : List[Any] = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals) if _descriptor._USE_C_DESCRIPTORS is False: _lowerCamelCase : str = None _lowerCamelCase : List[Any] = B"""H\003""" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _lowerCamelCase : Optional[int] = 45 _lowerCamelCase : Optional[Any] = 1581 _lowerCamelCase : List[Any] = 1517 _lowerCamelCase : Dict = 1570 _lowerCamelCase : Tuple = 1584 _lowerCamelCase : Any = 1793 _lowerCamelCase : Dict = 1795 _lowerCamelCase : List[str] = 1916 _lowerCamelCase : str = 1864 _lowerCamelCase : List[str] = 1905 _lowerCamelCase : int = 1919 _lowerCamelCase : str = 2429 _lowerCamelCase : List[str] = 2208 _lowerCamelCase : Any = 2418 _lowerCamelCase : Optional[int] = 2323 _lowerCamelCase : List[str] = 2407 # @@protoc_insertion_point(module_scope)
87
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : List[str] = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''openai-gpt''' UpperCAmelCase__ = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict=40_478 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=1e-5 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Any="cls_index" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=0.1 , **UpperCAmelCase__ : Dict , ) ->Any: '''simple docstring''' A__ = vocab_size A__ = n_positions A__ = n_embd A__ = n_layer A__ = n_head A__ = afn A__ = resid_pdrop A__ = embd_pdrop A__ = attn_pdrop A__ = layer_norm_epsilon A__ = initializer_range A__ = summary_type A__ = summary_use_proj A__ = summary_activation A__ = summary_first_dropout A__ = summary_proj_to_labels super().__init__(**UpperCAmelCase__)
87
1
from ..utils import DummyObject, requires_backends class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = ['''transformers''', '''torch''', '''note_seq'''] def __init__( self : Tuple , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Union[str, Any]) ->Any: '''simple docstring''' requires_backends(self , ['''transformers''', '''torch''', '''note_seq''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]: '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq''']) @classmethod def SCREAMING_SNAKE_CASE ( cls : List[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : List[str]) ->Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''])
87
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: """simple docstring""" return int((input_a, input_a).count(1 ) != 0 ) def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
87
1
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo _lowerCamelCase : Union[str, Any] = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ _lowerCamelCase : Union[str, Any] = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ _lowerCamelCase : str = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Tuple) ->MetricInfo: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''') , id='''sequence'''), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''') , id='''sequence''') , id='''references'''), }) , ) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : List[List[List[str]]] , UpperCAmelCase__ : List[List[str]] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 4 , ) ->Dict[str, float]: '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=UpperCAmelCase__ , hypotheses=UpperCAmelCase__ , min_len=UpperCAmelCase__ , max_len=UpperCAmelCase__) }
87
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Dict: """simple docstring""" if "." in tensor_name: A__ = tensor_name.split('''.''' ) for split in splits[:-1]: A__ = getattr(lowercase_ , lowercase_ ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) A__ = new_module A__ = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) A__ = tensor_name in module._buffers A__ = getattr(lowercase_ , lowercase_ ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) A__ = False A__ = False if is_buffer or not is_bitsandbytes_available(): A__ = False A__ = False else: A__ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) A__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: A__ = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: A__ = old_value.to(lowercase_ ) elif isinstance(lowercase_ , torch.Tensor ): A__ = value.to('''cpu''' ) if value.dtype == torch.inta: A__ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: A__ = torch.tensor(lowercase_ , device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , lowercase_ ) and fpaa_statistics is None: A__ = new_value.T A__ = old_value.__dict__ if is_abit: A__ = bnb.nn.IntaParams(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ ) elif is_abit: A__ = bnb.nn.Paramsabit(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ ) A__ = new_value if fpaa_statistics is not None: setattr(module.weight , '''SCB''' , fpaa_statistics.to(lowercase_ ) ) else: if value is None: A__ = old_value.to(lowercase_ ) elif isinstance(lowercase_ , torch.Tensor ): A__ = value.to(lowercase_ ) else: A__ = torch.tensor(lowercase_ , device=lowercase_ ) if is_buffer: A__ = new_value else: A__ = nn.Parameter(lowercase_ , requires_grad=old_value.requires_grad ) A__ = new_value def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False ) -> Dict: """simple docstring""" for name, module in model.named_children(): if current_key_name is None: A__ = [] current_key_name.append(lowercase_ ) if (isinstance(lowercase_ , nn.Linear ) or isinstance(lowercase_ , lowercase_ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(lowercase_ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(lowercase_ , lowercase_ ): A__ , A__ = module.weight.shape else: A__ = module.in_features A__ = module.out_features if quantization_config.quantization_method() == "llm_int8": A__ = bnb.nn.LinearabitLt( lowercase_ , lowercase_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) A__ = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: A__ = bnb.nn.Linearabit( lowercase_ , lowercase_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) A__ = True # Store the module class in case we need to transpose the weight later A__ = type(lowercase_ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(lowercase_ ) if len(list(module.children() ) ) > 0: A__ , A__ = _replace_with_bnb_linear( lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_been_replaced=lowercase_ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Tuple: """simple docstring""" A__ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert A__ , A__ = _replace_with_bnb_linear( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Dict: """simple docstring""" warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , lowercase_ , ) return replace_with_bnb_linear(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Optional[Any]: """simple docstring""" warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , lowercase_ , ) return set_module_quantized_tensor_to_device(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = deepcopy(lowercase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() A__ = find_tied_parameters(lowercase_ ) # For compatibility with Accelerate < 0.18 if isinstance(lowercase_ , lowercase_ ): A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: A__ = sum(lowercase_ , [] ) A__ = len(lowercase_ ) > 0 # Check if it is a base model A__ = not hasattr(lowercase_ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A__ = list(model.named_children() ) A__ = [list_modules[-1][0]] # add last module together with tied weights A__ = set(lowercase_ ) - set(lowercase_ ) A__ = list(set(lowercase_ ) ) + list(lowercase_ ) # remove ".weight" from the keys A__ = ['''.weight''', '''.bias'''] A__ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A__ = name.replace(lowercase_ , '''''' ) filtered_module_names.append(lowercase_ ) return filtered_module_names
87
1
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''AutoTokenizer''' UpperCAmelCase__ = ['''tokenizer'''] UpperCAmelCase__ = { '''semantic_prompt''': 1, '''coarse_prompt''': 2, '''fine_prompt''': 2, } def __init__( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str]=None) ->List[str]: '''simple docstring''' super().__init__(UpperCAmelCase__) A__ = speaker_embeddings @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any="speaker_embeddings_path.json" , **UpperCAmelCase__ : Dict) ->int: '''simple docstring''' if speaker_embeddings_dict_path is not None: A__ = get_file_from_repo( UpperCAmelCase__ , UpperCAmelCase__ , subfolder=kwargs.pop('''subfolder''' , UpperCAmelCase__) , cache_dir=kwargs.pop('''cache_dir''' , UpperCAmelCase__) , force_download=kwargs.pop('''force_download''' , UpperCAmelCase__) , proxies=kwargs.pop('''proxies''' , UpperCAmelCase__) , resume_download=kwargs.pop('''resume_download''' , UpperCAmelCase__) , local_files_only=kwargs.pop('''local_files_only''' , UpperCAmelCase__) , use_auth_token=kwargs.pop('''use_auth_token''' , UpperCAmelCase__) , revision=kwargs.pop('''revision''' , UpperCAmelCase__) , ) if speaker_embeddings_path is None: logger.warning( f"""`{os.path.join(UpperCAmelCase__ , UpperCAmelCase__)}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""") A__ = None else: with open(UpperCAmelCase__) as speaker_embeddings_json: A__ = json.load(UpperCAmelCase__) else: A__ = None A__ = AutoTokenizer.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__) return cls(tokenizer=UpperCAmelCase__ , speaker_embeddings=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple="speaker_embeddings_path.json" , UpperCAmelCase__ : Optional[Any]="speaker_embeddings" , UpperCAmelCase__ : bool = False , **UpperCAmelCase__ : List[str] , ) ->Union[str, Any]: '''simple docstring''' if self.speaker_embeddings is not None: os.makedirs(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ , '''v2''') , exist_ok=UpperCAmelCase__) A__ = {} A__ = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": A__ = self._load_voice_preset(UpperCAmelCase__) A__ = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''] , UpperCAmelCase__ , f"""{prompt_key}_{key}""") , voice_preset[key] , allow_pickle=UpperCAmelCase__ , ) A__ = os.path.join(UpperCAmelCase__ , f"""{prompt_key}_{key}.npy""") A__ = tmp_dict with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__) , '''w''') as fp: json.dump(UpperCAmelCase__ , UpperCAmelCase__) super().save_pretrained(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str = None , **UpperCAmelCase__ : Tuple) ->Union[str, Any]: '''simple docstring''' A__ = self.speaker_embeddings[voice_preset] A__ = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""") A__ = get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''' , '''/''') , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , UpperCAmelCase__) , cache_dir=kwargs.pop('''cache_dir''' , UpperCAmelCase__) , force_download=kwargs.pop('''force_download''' , UpperCAmelCase__) , proxies=kwargs.pop('''proxies''' , UpperCAmelCase__) , resume_download=kwargs.pop('''resume_download''' , UpperCAmelCase__) , local_files_only=kwargs.pop('''local_files_only''' , UpperCAmelCase__) , use_auth_token=kwargs.pop('''use_auth_token''' , UpperCAmelCase__) , revision=kwargs.pop('''revision''' , UpperCAmelCase__) , ) if path is None: raise ValueError( f"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/") , voice_preset_paths[key])}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.""") A__ = np.load(UpperCAmelCase__) return voice_preset_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Optional[dict] = None) ->Optional[int]: '''simple docstring''' for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f"""Voice preset unrecognized, missing {key} as a key.""") if not isinstance(voice_preset[key] , np.ndarray): raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.""") if len(voice_preset[key].shape) != self.preset_shape[key]: raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.""") def __call__( self : Optional[Any] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[Any]="pt" , UpperCAmelCase__ : str=256 , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : List[str]=False , **UpperCAmelCase__ : int , ) ->int: '''simple docstring''' if voice_preset is not None and not isinstance(UpperCAmelCase__ , UpperCAmelCase__): if ( isinstance(UpperCAmelCase__ , UpperCAmelCase__) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): A__ = self._load_voice_preset(UpperCAmelCase__) else: if isinstance(UpperCAmelCase__ , UpperCAmelCase__) and not voice_preset.endswith('''.npz'''): A__ = voice_preset + '''.npz''' A__ = np.load(UpperCAmelCase__) if voice_preset is not None: self._validate_voice_preset_dict(UpperCAmelCase__ , **UpperCAmelCase__) A__ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__) A__ = self.tokenizer( UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , padding='''max_length''' , max_length=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , ) if voice_preset is not None: A__ = voice_preset return encoded_text
87
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) _lowerCamelCase : str = 299792458 # Symbols _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = symbols("""ct x y z""") def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float: """simple docstring""" if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float: """simple docstring""" return 1 / sqrt(1 - beta(lowercase_ ) ** 2 ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> np.ndarray: """simple docstring""" return np.array( [ [gamma(lowercase_ ), -gamma(lowercase_ ) * beta(lowercase_ ), 0, 0], [-gamma(lowercase_ ) * beta(lowercase_ ), gamma(lowercase_ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = None ) -> np.ndarray: """simple docstring""" if event is None: A__ = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(lowercase_ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: _lowerCamelCase : Tuple = transform(29979245) print("""Example of four vector: """) print(F'''ct\' = {four_vector[0]}''') print(F'''x\' = {four_vector[1]}''') print(F'''y\' = {four_vector[2]}''') print(F'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values _lowerCamelCase : int = {ct: c, x: 1, y: 1, z: 1} _lowerCamelCase : Any = [four_vector[i].subs(sub_dict) for i in range(4)] print(F'''\n{numerical_vector}''')
87
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCamelCase : Tuple = { """configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""], """feature_extraction_whisper""": ["""WhisperFeatureExtractor"""], """processing_whisper""": ["""WhisperProcessor"""], """tokenization_whisper""": ["""WhisperTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = ["""WhisperTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = [ """WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """WhisperForConditionalGeneration""", """WhisperModel""", """WhisperPreTrainedModel""", """WhisperForAudioClassification""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Any = [ """TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFWhisperForConditionalGeneration""", """TFWhisperModel""", """TFWhisperPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Union[str, Any] = [ """FlaxWhisperForConditionalGeneration""", """FlaxWhisperModel""", """FlaxWhisperPreTrainedModel""", """FlaxWhisperForAudioClassification""", ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys _lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list: """simple docstring""" if len(lowercase_ ) <= 1: return [tuple(lowercase_ )] A__ = [] def generate(lowercase_ , lowercase_ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowercase_ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A__ , A__ = arr[k - 1], arr[i] else: # k is odd A__ , A__ = arr[k - 1], arr[0] generate(k - 1 , lowercase_ ) generate(len(lowercase_ ) , lowercase_ ) return res if __name__ == "__main__": _lowerCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip() _lowerCamelCase : str = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
87
1
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance _lowerCamelCase : Dict = 6_378_137.0 _lowerCamelCase : Union[str, Any] = 6_356_752.314_245 _lowerCamelCase : List[Any] = 6378137 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> float: """simple docstring""" A__ = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) ) A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius A__ = haversine_distance(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) / EQUATORIAL_RADIUS # Intermediate P and Q values A__ = (b_lata + b_lata) / 2 A__ = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) A__ = (sin(lowercase_ ) ** 2) * (cos(lowercase_ ) ** 2) A__ = cos(sigma / 2 ) ** 2 A__ = (sigma - sin(lowercase_ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) A__ = (cos(lowercase_ ) ** 2) * (sin(lowercase_ ) ** 2) A__ = sin(sigma / 2 ) ** 2 A__ = (sigma + sin(lowercase_ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
87
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" return 1.0 / (1.0 + np.exp(-_outputs )) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = np.max(_outputs , axis=-1 , keepdims=lowercase_ ) A__ = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase_ ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''sigmoid''' UpperCAmelCase__ = '''softmax''' UpperCAmelCase__ = '''none''' @add_end_docstrings( UpperCAmelCase__ , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = False UpperCAmelCase__ = ClassificationFunction.NONE def __init__( self : Any , **UpperCAmelCase__ : Optional[Any]) ->str: '''simple docstring''' super().__init__(**UpperCAmelCase__) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int="" , **UpperCAmelCase__ : Any) ->int: '''simple docstring''' A__ = tokenizer_kwargs A__ = {} if hasattr(self.model.config , '''return_all_scores''') and return_all_scores is None: A__ = self.model.config.return_all_scores if isinstance(UpperCAmelCase__ , UpperCAmelCase__) or top_k is None: A__ = top_k A__ = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , UpperCAmelCase__ , ) if return_all_scores: A__ = None else: A__ = 1 if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: A__ = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int]) ->Union[str, Any]: '''simple docstring''' A__ = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__) # TODO try and retrieve it in a nicer way from _sanitize_parameters. A__ = '''top_k''' not in kwargs if isinstance(args[0] , UpperCAmelCase__) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Any , **UpperCAmelCase__ : str) ->Dict[str, GenericTensor]: '''simple docstring''' A__ = self.framework if isinstance(UpperCAmelCase__ , UpperCAmelCase__): return self.tokenizer(**UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__) and len(UpperCAmelCase__) == 1 and isinstance(inputs[0] , UpperCAmelCase__) and len(inputs[0]) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''') return self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple) ->Tuple: '''simple docstring''' return self.model(**UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : str=True) ->Dict: '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: A__ = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: A__ = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''') and function_to_apply is None: A__ = self.model.config.function_to_apply else: A__ = ClassificationFunction.NONE A__ = model_outputs['''logits'''][0] A__ = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: A__ = sigmoid(UpperCAmelCase__) elif function_to_apply == ClassificationFunction.SOFTMAX: A__ = softmax(UpperCAmelCase__) elif function_to_apply == ClassificationFunction.NONE: A__ = outputs else: raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""") if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} A__ = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(UpperCAmelCase__) ] if not _legacy: dict_scores.sort(key=lambda UpperCAmelCase__: x["score"] , reverse=UpperCAmelCase__) if top_k is not None: A__ = dict_scores[:top_k] return dict_scores
87
1
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class UpperCamelCase_ : '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : List[Any]=7 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : Tuple=32 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Union[str, Any]=37 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : int=512 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : int=None , ) ->Optional[int]: '''simple docstring''' A__ = parent A__ = 13 A__ = 7 A__ = True A__ = True A__ = True A__ = True A__ = 99 A__ = 384 A__ = 2 A__ = 4 A__ = 37 A__ = '''gelu''' A__ = 0.1 A__ = 0.1 A__ = 512 A__ = 16 A__ = 2 A__ = 0.02 A__ = 3 A__ = 4 A__ = 128 A__ = 2 A__ = 9 A__ = 1 A__ = None def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) A__ = ids_tensor([self.batch_size] , self.num_choices) A__ = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any]) ->List[Any]: '''simple docstring''' A__ = TFConvBertModel(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = [input_ids, input_mask] A__ = model(UpperCAmelCase__) A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple) ->Tuple: '''simple docstring''' A__ = TFConvBertForMaskedLM(config=UpperCAmelCase__) A__ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str) ->Tuple: '''simple docstring''' A__ = self.num_labels A__ = TFConvBertForSequenceClassification(config=UpperCAmelCase__) A__ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.num_choices A__ = TFConvBertForMultipleChoice(config=UpperCAmelCase__) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str) ->Tuple: '''simple docstring''' A__ = self.num_labels A__ = TFConvBertForTokenClassification(config=UpperCAmelCase__) A__ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str]) ->Any: '''simple docstring''' A__ = TFConvBertForQuestionAnswering(config=UpperCAmelCase__) A__ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]: '''simple docstring''' A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) UpperCAmelCase__ = ( { '''feature-extraction''': TFConvBertModel, '''fill-mask''': TFConvBertForMaskedLM, '''question-answering''': TFConvBertForQuestionAnswering, '''text-classification''': TFConvBertForSequenceClassification, '''token-classification''': TFConvBertForTokenClassification, '''zero-shot''': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: '''simple docstring''' A__ = TFConvBertModelTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->int: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->int: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True A__ = True if hasattr(UpperCAmelCase__ , '''use_cache'''): A__ = True A__ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length) A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__) for model_class in self.all_model_classes: A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) A__ = model_class(UpperCAmelCase__) A__ = len(model(UpperCAmelCase__)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase__ , saved_model=UpperCAmelCase__) A__ = os.path.join(UpperCAmelCase__ , '''saved_model''' , '''1''') A__ = tf.keras.models.load_model(UpperCAmelCase__) A__ = model(UpperCAmelCase__) if self.is_encoder_decoder: A__ = outputs['''encoder_hidden_states'''] A__ = outputs['''encoder_attentions'''] else: A__ = outputs['''hidden_states'''] A__ = outputs['''attentions'''] self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) A__ = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1) self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__) self.assertListEqual( list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_hidden_layers) self.assertListEqual( list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple: '''simple docstring''' A__ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''') self.assertIsNotNone(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True A__ = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length) A__ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length) A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__) A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__) def check_decoder_attentions_output(UpperCAmelCase__ : List[Any]): A__ = len(UpperCAmelCase__) self.assertEqual(out_len % 2 , 0) A__ = outputs.decoder_attentions self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(UpperCAmelCase__ : str): A__ = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: A__ = True A__ = False A__ = model_class(UpperCAmelCase__) A__ = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)) A__ = len(UpperCAmelCase__) self.assertEqual(config.output_hidden_states , UpperCAmelCase__) check_encoder_attentions_output(UpperCAmelCase__) if self.is_encoder_decoder: A__ = model_class(UpperCAmelCase__) A__ = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)) self.assertEqual(config.output_hidden_states , UpperCAmelCase__) check_decoder_attentions_output(UpperCAmelCase__) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)) self.assertEqual(config.output_hidden_states , UpperCAmelCase__) check_encoder_attentions_output(UpperCAmelCase__) # Check attention is always last and order is fine A__ = True A__ = True A__ = model_class(UpperCAmelCase__) A__ = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase__)) self.assertEqual(model.config.output_hidden_states , UpperCAmelCase__) check_encoder_attentions_output(UpperCAmelCase__) @require_tf class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any: '''simple docstring''' A__ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''') A__ = tf.constant([[0, 1, 2, 3, 4, 5]]) A__ = model(UpperCAmelCase__)[0] A__ = [1, 6, 768] self.assertEqual(output.shape , UpperCAmelCase__) A__ = tf.constant( [ [ [-0.03475493, -0.4686034, -0.30638832], [0.22637248, -0.26988646, -0.7423424], [0.10324868, -0.45013508, -0.58280784], ] ]) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)
87
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCamelCase : Any = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Union[str, Any] = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : List[str] = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''openai-gpt''' UpperCAmelCase__ = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict=40_478 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=1e-5 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Any="cls_index" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=0.1 , **UpperCAmelCase__ : Dict , ) ->Any: '''simple docstring''' A__ = vocab_size A__ = n_positions A__ = n_embd A__ = n_layer A__ = n_head A__ = afn A__ = resid_pdrop A__ = embd_pdrop A__ = attn_pdrop A__ = layer_norm_epsilon A__ = initializer_range A__ = summary_type A__ = summary_use_proj A__ = summary_activation A__ = summary_first_dropout A__ = summary_proj_to_labels super().__init__(**UpperCAmelCase__)
87
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Union[str, Any] = { """google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""", """google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''mobilenet_v1''' def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Optional[Any]=224 , UpperCAmelCase__ : Optional[int]=1.0 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : Tuple="relu6" , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=0.999 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[int]=0.001 , **UpperCAmelCase__ : Dict , ) ->List[str]: '''simple docstring''' super().__init__(**UpperCAmelCase__) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''') A__ = num_channels A__ = image_size A__ = depth_multiplier A__ = min_depth A__ = hidden_act A__ = tf_padding A__ = classifier_dropout_prob A__ = initializer_range A__ = layer_norm_eps class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self : Any) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict([('''pixel_values''', {0: '''batch'''})]) @property def SCREAMING_SNAKE_CASE ( self : List[str]) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})]) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})]) @property def SCREAMING_SNAKE_CASE ( self : int) ->float: '''simple docstring''' return 1e-4
87
1
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path _lowerCamelCase : Tuple = Path(__file__).resolve().parents[3] / """src""" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) _lowerCamelCase : Optional[Any] = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""} _lowerCamelCase : Optional[Any] = """zero2""" _lowerCamelCase : List[str] = """zero3""" _lowerCamelCase : Union[str, Any] = [ZEROa, ZEROa] def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: """simple docstring""" A__ = parameterized.to_safe_name('''_'''.join(str(lowercase_ ) for x in param.args ) ) return f"""{func.__name__}_{param_based_name}""" # Cartesian-product of zero stages with models to test _lowerCamelCase : List[str] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' @parameterized.expand(UpperCAmelCase__ , name_func=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str) ->Optional[Any]: '''simple docstring''' self.run_and_check( stage=UpperCAmelCase__ , model=UpperCAmelCase__ , distributed=UpperCAmelCase__ , fpaa=UpperCAmelCase__ , ) @require_torch_multi_gpu @parameterized.expand(UpperCAmelCase__ , name_func=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any]) ->Optional[Any]: '''simple docstring''' self.run_and_check( stage=UpperCAmelCase__ , model=UpperCAmelCase__ , distributed=UpperCAmelCase__ , fpaa=UpperCAmelCase__ , ) @parameterized.expand(UpperCAmelCase__ , name_func=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple) ->Any: '''simple docstring''' self.run_and_check( stage=UpperCAmelCase__ , model=UpperCAmelCase__ , distributed=UpperCAmelCase__ , fpaa=UpperCAmelCase__ , ) @require_torch_multi_gpu @parameterized.expand(UpperCAmelCase__ , name_func=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any) ->Union[str, Any]: '''simple docstring''' self.run_and_check( stage=UpperCAmelCase__ , model=UpperCAmelCase__ , distributed=UpperCAmelCase__ , fpaa=UpperCAmelCase__ , ) def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : int) ->Optional[int]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , ) ->List[Any]: '''simple docstring''' A__ = models[model] A__ = self.run_trainer( stage=UpperCAmelCase__ , model_name=UpperCAmelCase__ , eval_steps=UpperCAmelCase__ , num_train_epochs=1 , distributed=UpperCAmelCase__ , fpaa=UpperCAmelCase__ , ) self.do_checks(UpperCAmelCase__) return output_dir def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , ) ->Any: '''simple docstring''' A__ = self.get_auto_remove_tmp_dir('''./xxx''' , after=UpperCAmelCase__) A__ = f""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(UpperCAmelCase__)} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fpaa: args.extend(['''--fp16''']) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files A__ = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split() A__ = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""] A__ = self.get_launcher(UpperCAmelCase__) A__ = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(UpperCAmelCase__ , env=self.get_env()) return output_dir def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[str]=False) ->Optional[Any]: '''simple docstring''' A__ = min(2 , get_gpu_count()) if distributed else 1 return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
87
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp _lowerCamelCase : str = 5 _lowerCamelCase : int = 10 @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = SpeechaTextTokenizer UpperCAmelCase__ = False UpperCAmelCase__ = True def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' super().setUp() A__ = sp.SentencePieceProcessor() spm_model.Load(UpperCAmelCase__) A__ = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_) for id_ in range(len(UpperCAmelCase__))] A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__)))) A__ = Path(self.tmpdirname) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file''']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file''']) A__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]: '''simple docstring''' A__ = '''<pad>''' A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<s>''') self.assertEqual(vocab_keys[1] , '''<pad>''') self.assertEqual(vocab_keys[-1] , '''j''') self.assertEqual(len(UpperCAmelCase__) , 1_001) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_001) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' A__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) A__ = tokenizer.tokenize('''This is a test''') self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , [289, 50, 14, 174, 386] , ) A__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') self.assertListEqual( UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) A__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__) self.assertListEqual(UpperCAmelCase__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8]) A__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__) self.assertListEqual( UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: '''simple docstring''' A__ = {'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , ) @require_sentencepiece class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = '''valhalla/s2t_mustc_multilinguial_medium''' UpperCAmelCase__ = '''C\'est trop cool''' UpperCAmelCase__ = '''Esto es genial''' @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict) ->Dict: '''simple docstring''' A__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name) return cls def SCREAMING_SNAKE_CASE ( self : str) ->Dict: '''simple docstring''' self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' self.assertEqual(self.tokenizer.vocab_size , 10_000) def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids) A__ = [ES_CODE, 4, 1_601, 47, 7_647, 2] A__ = self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__) A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->str: '''simple docstring''' A__ = '''fr''' A__ = self.tokenizer(self.french_text).input_ids self.assertEqual(encoded[0] , UpperCAmelCase__) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' A__ = '''fr''' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE]) A__ = '''es''' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
87
1
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class UpperCamelCase_ ( pl.LightningModule ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' super().__init__() A__ = model A__ = 2 A__ = nn.Linear(self.model.config.hidden_size , self.num_labels) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Tuple: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]: """simple docstring""" A__ = LongformerModel.from_pretrained(lowercase_ ) A__ = LightningModel(lowercase_ ) A__ = torch.load(lowercase_ , map_location=torch.device('''cpu''' ) ) lightning_model.load_state_dict(ckpt['''state_dict'''] ) # init longformer question answering model A__ = LongformerForQuestionAnswering.from_pretrained(lowercase_ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(lowercase_ ) print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": _lowerCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--longformer_model""", default=None, type=str, required=True, help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""", ) parser.add_argument( """--longformer_question_answering_ckpt_path""", default=None, type=str, required=True, help="""Path the official PyTorch Lightning Checkpoint.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCamelCase : Dict = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
87
from __future__ import annotations import requests def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict: """simple docstring""" A__ = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty""" return requests.get(lowercase_ ).json() def SCREAMING_SNAKE_CASE ( lowercase_ = 10 ) -> list[dict]: """simple docstring""" A__ = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty''' A__ = requests.get(lowercase_ ).json()[:max_stories] return [get_hackernews_story(lowercase_ ) for story_id in story_ids] def SCREAMING_SNAKE_CASE ( lowercase_ = 10 ) -> str: """simple docstring""" A__ = hackernews_top_stories(lowercase_ ) return "\n".join('''* [{title}]({url})'''.format(**lowercase_ ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
87
1