code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
import logging from transformers import PretrainedConfig a__ = logging.getLogger(__name__) a__ = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Any = "bertabs" def __init__( self , _a=3_0_5_2_2 , _a=5_1_2 , _a=6 , _a=5_1_2 , _a=8 , _a=5_1_2 , _a=0.2 , _a=6 , _a=7_6_8 , _a=8 , _a=2_0_4_8 , _a=0.2 , **_a , ) -> Any: super().__init__(**_a ) _a : int = vocab_size _a : List[str] = max_pos _a : Tuple = enc_layers _a : Optional[Any] = enc_hidden_size _a : int = enc_heads _a : Optional[Any] = enc_ff_size _a : List[str] = enc_dropout _a : Tuple = dec_layers _a : Optional[Any] = dec_hidden_size _a : Optional[Any] = dec_heads _a : Optional[Any] = dec_ff_size _a : List[Any] = dec_dropout
235
0
'''simple docstring''' def __lowerCamelCase ( A__ , A__ ) -> int: """simple docstring""" return int((input_a, input_a).count(0 ) != 0 ) def __lowerCamelCase ( ) -> None: """simple docstring""" assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
357
'''simple docstring''' def __lowerCamelCase ( A__ , A__ ) -> float: """simple docstring""" if density <= 0: raise ValueError('Impossible fluid density' ) if bulk_modulus <= 0: raise ValueError('Impossible bulk modulus' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
249
0
"""simple docstring""" import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCamelCase : '''simple docstring''' def __init__( self , __a , __a=2 , __a=8 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=16 , __a=5 , __a=2 , __a=36 , __a="gelu" , __a=0.0 , __a=0.0 , __a=5_12 , __a=16 , __a=2 , __a=0.0_2 , __a=3 , __a=4 , __a=None , ): __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = seq_length __lowerCAmelCase = is_training __lowerCAmelCase = use_input_mask __lowerCAmelCase = use_token_type_ids __lowerCAmelCase = use_labels __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = type_sequence_label_size __lowerCAmelCase = initializer_range __lowerCAmelCase = num_labels __lowerCAmelCase = num_choices __lowerCAmelCase = scope def snake_case ( self ): __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase = None if self.use_input_mask: __lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase = None if self.use_token_type_ids: __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = None if self.use_labels: __lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self ): return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , ) def snake_case ( self ): __lowerCAmelCase = self.get_config() __lowerCAmelCase = 3_00 return config def snake_case ( self ): ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = self.prepare_config_and_inputs() __lowerCAmelCase = True __lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCAmelCase = MraModel(config=__a ) model.to(__a ) model.eval() __lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a ) __lowerCAmelCase = model(__a , token_type_ids=__a ) __lowerCAmelCase = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ): __lowerCAmelCase = True __lowerCAmelCase = MraModel(__a ) model.to(__a ) model.eval() __lowerCAmelCase = model( __a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , ) __lowerCAmelCase = model( __a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , ) __lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCAmelCase = MraForMaskedLM(config=__a ) model.to(__a ) model.eval() __lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCAmelCase = MraForQuestionAnswering(config=__a ) model.to(__a ) model.eval() __lowerCAmelCase = model( __a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCAmelCase = self.num_labels __lowerCAmelCase = MraForSequenceClassification(__a ) model.to(__a ) model.eval() __lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCAmelCase = self.num_labels __lowerCAmelCase = MraForTokenClassification(config=__a ) model.to(__a ) model.eval() __lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ): __lowerCAmelCase = self.num_choices __lowerCAmelCase = MraForMultipleChoice(config=__a ) model.to(__a ) model.eval() __lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case ( self ): __lowerCAmelCase = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) = config_and_inputs __lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : List[Any] =( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) __UpperCAmelCase : str =False __UpperCAmelCase : int =False __UpperCAmelCase : Optional[Any] =False __UpperCAmelCase : Tuple =False __UpperCAmelCase : List[Any] =() def snake_case ( self ): __lowerCAmelCase = MraModelTester(self ) __lowerCAmelCase = ConfigTester(self , config_class=__a , hidden_size=37 ) def snake_case ( self ): self.config_tester.run_common_tests() def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCAmelCase = type self.model_tester.create_and_check_model(*__a ) def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__a ) def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a ) def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__a ) def snake_case ( self ): __lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) @slow def snake_case ( self ): for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase = MraModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @unittest.skip(reason="MRA does not output attentions" ) def snake_case ( self ): return @require_torch class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def snake_case ( self ): __lowerCAmelCase = MraModel.from_pretrained("uw-madison/mra-base-512-4" ) __lowerCAmelCase = torch.arange(2_56 ).unsqueeze(0 ) with torch.no_grad(): __lowerCAmelCase = model(__a )[0] __lowerCAmelCase = torch.Size((1, 2_56, 7_68) ) self.assertEqual(output.shape , __a ) __lowerCAmelCase = torch.tensor( [[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) ) @slow def snake_case ( self ): __lowerCAmelCase = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" ) __lowerCAmelCase = torch.arange(2_56 ).unsqueeze(0 ) with torch.no_grad(): __lowerCAmelCase = model(__a )[0] __lowerCAmelCase = 5_02_65 __lowerCAmelCase = torch.Size((1, 2_56, vocab_size) ) self.assertEqual(output.shape , __a ) __lowerCAmelCase = torch.tensor( [[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) ) @slow def snake_case ( self ): __lowerCAmelCase = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" ) __lowerCAmelCase = torch.arange(40_96 ).unsqueeze(0 ) with torch.no_grad(): __lowerCAmelCase = model(__a )[0] __lowerCAmelCase = 5_02_65 __lowerCAmelCase = torch.Size((1, 40_96, vocab_size) ) self.assertEqual(output.shape , __a ) __lowerCAmelCase = torch.tensor( [[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
57
"""simple docstring""" import argparse import os import re import packaging.version A : Any = "examples/" A : Optional[Any] = { "examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"), "init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"), "setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","), "doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"), } A : Optional[int] = { "init": "src/transformers/__init__.py", "setup": "setup.py", } A : List[Any] = "README.md" def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern] __lowerCAmelCase = replace.replace("VERSION" , _UpperCamelCase ) __lowerCAmelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase ) with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(_UpperCamelCase ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' for folder, directories, fnames in os.walk(_UpperCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern="examples" ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ): '''simple docstring''' for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if not patch: update_version_in_examples(_UpperCamelCase ) def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = "🤗 Transformers currently provides the following architectures" __lowerCAmelCase = "1. Want to contribute a new model?" with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f: __lowerCAmelCase = f.readlines() # Find the start of the list. __lowerCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __lowerCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): __lowerCAmelCase = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , ) index += 1 with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(_UpperCamelCase ) def _lowerCamelCase ( ): '''simple docstring''' with open(REPLACE_FILES["init"] , "r" ) as f: __lowerCAmelCase = f.read() __lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(_UpperCamelCase ).groups()[0] return packaging.version.parse(_UpperCamelCase ) def _lowerCamelCase ( _UpperCamelCase=False ): '''simple docstring''' __lowerCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: __lowerCAmelCase = default_version.base_version elif patch: __lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}" else: __lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0" # Now let's ask nicely if that's the right one. __lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" ) if len(_UpperCamelCase ) == 0: __lowerCAmelCase = default_version print(f"Updating version to {version}." ) global_version_update(_UpperCamelCase , patch=_UpperCamelCase ) if not patch: print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = get_version() __lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0" __lowerCAmelCase = current_version.base_version # Check with the user we got that right. __lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" ) if len(_UpperCamelCase ) == 0: __lowerCAmelCase = dev_version print(f"Updating version to {version}." ) global_version_update(_UpperCamelCase ) print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() if __name__ == "__main__": A : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") A : Dict = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
57
1
'''simple docstring''' from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' UpperCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' UpperCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __snake_case( datasets.Metric ): '''simple docstring''' def __snake_case ( self ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , ) def __snake_case ( self , A_ , A_ , A_ = 1 , A_ = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=A_ , hypotheses=A_ , min_len=A_ , max_len=A_ ) }
351
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = '▁' UpperCAmelCase = { 'vocab_file': 'vocab.json', 'spm_file': 'sentencepiece.bpe.model', 'tokenizer_config_file': 'tokenizer_config.json', } UpperCAmelCase = { 'vocab_file': { 'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json', 'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json', }, 'spm_file': { 'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model', 'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model', }, 'tokenizer_config_file': { 'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json', 'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json', }, } UpperCAmelCase = { 'facebook/m2m100_418M': 1024, } # fmt: off UpperCAmelCase = { 'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'], 'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de'] } class __snake_case( _lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : int = ["input_ids", "attention_mask"] UpperCAmelCase : List[int] = [] UpperCAmelCase : List[int] = [] def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None: lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase = language_codes lowerCAmelCase = FAIRSEQ_LANGUAGE_CODES[language_codes] lowerCAmelCase = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code} lowerCAmelCase = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(A_ ) for lang_code in fairseq_language_code if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , ) lowerCAmelCase = vocab_file lowerCAmelCase = load_json(A_ ) lowerCAmelCase = {v: k for k, v in self.encoder.items()} lowerCAmelCase = spm_file lowerCAmelCase = load_spm(A_ , self.sp_model_kwargs ) lowerCAmelCase = len(self.encoder ) lowerCAmelCase = { self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ ) } lowerCAmelCase = {lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )} lowerCAmelCase = {v: k for k, v in self.lang_token_to_id.items()} lowerCAmelCase = src_lang if src_lang is not None else """en""" lowerCAmelCase = tgt_lang lowerCAmelCase = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) lowerCAmelCase = num_madeup_words @property def __snake_case ( self ) -> int: return len(self.encoder ) + len(self.lang_token_to_id ) @property def __snake_case ( self ) -> str: return self._src_lang @src_lang.setter def __snake_case ( self , A_ ) -> None: lowerCAmelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __snake_case ( self , A_ ) -> List[str]: return self.sp_model.encode(A_ , out_type=A_ ) def __snake_case ( self , A_ ) -> Any: if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(A_ , self.encoder[self.unk_token] ) def __snake_case ( self , A_ ) -> str: if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(A_ , self.unk_token ) def __snake_case ( self , A_ ) -> List[str]: lowerCAmelCase = [] lowerCAmelCase = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(A_ ) + token lowerCAmelCase = [] else: current_sub_tokens.append(A_ ) out_string += self.sp_model.decode(A_ ) return out_string.strip() def __snake_case ( self , A_ , A_ = None , A_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) lowerCAmelCase = [1] * len(self.prefix_tokens ) lowerCAmelCase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(A_ )) + suffix_ones return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones def __snake_case ( self , A_ , A_ = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __snake_case ( self ) -> Dict: lowerCAmelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Dict: lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , A_ ) -> None: lowerCAmelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowerCAmelCase = {} lowerCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs ) def __snake_case ( self , A_ , A_ = None ) -> Tuple[str]: lowerCAmelCase = Path(A_ ) if not save_dir.is_dir(): raise OSError(f'{save_directory} should be a directory' ) lowerCAmelCase = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) lowerCAmelCase = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , A_ ) if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , A_ ) elif not os.path.isfile(self.spm_file ): with open(A_ , """wb""" ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(A_ ) return (str(A_ ), str(A_ )) def __snake_case ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding: lowerCAmelCase = src_lang lowerCAmelCase = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(A_ , A_ , **A_ ) def __snake_case ( self , A_ , A_ , A_ , **A_ ) -> str: if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) lowerCAmelCase = src_lang lowerCAmelCase = self(A_ , add_special_tokens=A_ , **A_ ) lowerCAmelCase = self.get_lang_id(A_ ) lowerCAmelCase = tgt_lang_id return inputs def __snake_case ( self ) -> Any: self.set_src_lang_special_tokens(self.src_lang ) def __snake_case ( self ) -> Optional[int]: self.set_tgt_lang_special_tokens(self.tgt_lang ) def __snake_case ( self , A_ ) -> None: lowerCAmelCase = self.get_lang_token(A_ ) lowerCAmelCase = self.lang_token_to_id[lang_token] lowerCAmelCase = [self.cur_lang_id] lowerCAmelCase = [self.eos_token_id] def __snake_case ( self , A_ ) -> None: lowerCAmelCase = self.get_lang_token(A_ ) lowerCAmelCase = self.lang_token_to_id[lang_token] lowerCAmelCase = [self.cur_lang_id] lowerCAmelCase = [self.eos_token_id] def __snake_case ( self , A_ ) -> str: return self.lang_code_to_token[lang] def __snake_case ( self , A_ ) -> int: lowerCAmelCase = self.get_lang_token(A_ ) return self.lang_token_to_id[lang_token] def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: """simple docstring""" lowerCAmelCase = sentencepiece.SentencePieceProcessor(**_SCREAMING_SNAKE_CASE ) spm.Load(str(_SCREAMING_SNAKE_CASE ) ) return spm def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> Union[Dict, List]: """simple docstring""" with open(_SCREAMING_SNAKE_CASE , """r""" ) as f: return json.load(_SCREAMING_SNAKE_CASE ) def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str ) -> None: """simple docstring""" with open(_SCREAMING_SNAKE_CASE , """w""" ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , indent=2 )
187
0
'''simple docstring''' def __lowercase ( __lowercase ) -> int: '''simple docstring''' assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: _A = F'''The input value of [n={number}] has to be > 0''' raise ValueError(__lowercase ) else: _A = sylvester(number - 1 ) _A = num - 1 _A = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCamelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["SpeechEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["FlaxSpeechEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
251
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCamelCase : Dict =logging.get_logger(__name__) lowerCamelCase : Optional[Any] ={ '''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''', # See all Marian models at https://huggingface.co/models?filter=marian } class __a ( __SCREAMING_SNAKE_CASE ): _lowerCAmelCase : List[str] = 'marian' _lowerCAmelCase : Any = ['past_key_values'] _lowerCAmelCase : int = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict=5_81_01 , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=10_24 , SCREAMING_SNAKE_CASE : Optional[int]=12 , SCREAMING_SNAKE_CASE : str=40_96 , SCREAMING_SNAKE_CASE : Dict=16 , SCREAMING_SNAKE_CASE : Tuple=12 , SCREAMING_SNAKE_CASE : List[Any]=40_96 , SCREAMING_SNAKE_CASE : Union[str, Any]=16 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : int=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10_24 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : int=0.0 , SCREAMING_SNAKE_CASE : Dict=0.0 , SCREAMING_SNAKE_CASE : Dict=0.0_2 , SCREAMING_SNAKE_CASE : Any=5_81_00 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Union[str, Any]=5_81_00 , SCREAMING_SNAKE_CASE : Optional[int]=0 , SCREAMING_SNAKE_CASE : List[str]=0 , SCREAMING_SNAKE_CASE : List[Any]=True , **SCREAMING_SNAKE_CASE : Optional[Any] , ): '''simple docstring''' UpperCamelCase__ : Optional[int] = vocab_size UpperCamelCase__ : int = decoder_vocab_size or vocab_size UpperCamelCase__ : Optional[Any] = max_position_embeddings UpperCamelCase__ : Dict = d_model UpperCamelCase__ : Optional[int] = encoder_ffn_dim UpperCamelCase__ : Tuple = encoder_layers UpperCamelCase__ : Union[str, Any] = encoder_attention_heads UpperCamelCase__ : Tuple = decoder_ffn_dim UpperCamelCase__ : Optional[Any] = decoder_layers UpperCamelCase__ : Optional[Any] = decoder_attention_heads UpperCamelCase__ : Any = dropout UpperCamelCase__ : Optional[int] = attention_dropout UpperCamelCase__ : Optional[Any] = activation_dropout UpperCamelCase__ : Dict = activation_function UpperCamelCase__ : str = init_std UpperCamelCase__ : Any = encoder_layerdrop UpperCamelCase__ : Tuple = decoder_layerdrop UpperCamelCase__ : Union[str, Any] = use_cache UpperCamelCase__ : List[str] = encoder_layers UpperCamelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True UpperCamelCase__ : Dict = share_encoder_decoder_embeddings super().__init__( pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , ) class __a ( __SCREAMING_SNAKE_CASE ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def __lowercase ( self : List[Any] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase__ : int = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase__ : Dict = {0: "batch"} UpperCamelCase__ : str = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCamelCase__ : int = {0: "batch", 1: "decoder_sequence"} UpperCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(_a , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCamelCase__ : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase__ , UpperCamelCase__ : str = self.num_layers for i in range(_a ): UpperCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase__ : int = {0: "batch", 2: "past_sequence + sequence"} else: UpperCamelCase__ : List[str] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def __lowercase ( self : Optional[Any] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase__ : Optional[int] = super().outputs else: UpperCamelCase__ : Union[str, Any] = super(_a , self ).outputs if self.use_past: UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.num_layers for i in range(_a ): UpperCamelCase__ : List[Any] = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = -1 , SCREAMING_SNAKE_CASE : Optional[Any] = -1 , SCREAMING_SNAKE_CASE : Optional[int] = False , SCREAMING_SNAKE_CASE : List[str] = None , ): '''simple docstring''' UpperCamelCase__ : Dict = self._generate_dummy_inputs_for_encoder_and_decoder( _a , _a , _a , _a , _a ) # Generate decoder inputs UpperCamelCase__ : Tuple = seq_length if not self.use_past else 1 UpperCamelCase__ : str = self._generate_dummy_inputs_for_encoder_and_decoder( _a , _a , _a , _a , _a ) UpperCamelCase__ : Union[str, Any] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} UpperCamelCase__ : str = dict(**_a , **_a ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase__ , UpperCamelCase__ : Dict = common_inputs["input_ids"].shape UpperCamelCase__ : Optional[int] = common_inputs["decoder_input_ids"].shape[1] UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = self.num_attention_heads UpperCamelCase__ : Dict = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase__ : Union[str, Any] = decoder_seq_length + 3 UpperCamelCase__ : Tuple = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCamelCase__ : Dict = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(_a , _a )] , dim=1 ) UpperCamelCase__ : Any = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCamelCase__ , UpperCamelCase__ : Tuple = self.num_layers UpperCamelCase__ : Any = min(_a , _a ) UpperCamelCase__ : Optional[int] = max(_a , _a ) - min_num_layers UpperCamelCase__ : List[str] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(_a ): common_inputs["past_key_values"].append( ( torch.zeros(_a ), torch.zeros(_a ), torch.zeros(_a ), torch.zeros(_a ), ) ) # TODO: test this. UpperCamelCase__ : Union[str, Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(_a , _a ): common_inputs["past_key_values"].append((torch.zeros(_a ), torch.zeros(_a )) ) return common_inputs def __lowercase ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] = -1 , SCREAMING_SNAKE_CASE : List[str] = -1 , SCREAMING_SNAKE_CASE : List[Any] = False , SCREAMING_SNAKE_CASE : Union[str, Any] = None , ): '''simple docstring''' UpperCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder( _a , _a , _a , _a , _a ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase__ , UpperCamelCase__ : int = common_inputs["input_ids"].shape # Not using the same length for past_key_values UpperCamelCase__ : Any = seqlen + 2 UpperCamelCase__ , UpperCamelCase__ : str = self.num_layers UpperCamelCase__ , UpperCamelCase__ : Dict = self.num_attention_heads UpperCamelCase__ : List[str] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase__ : Optional[Any] = common_inputs["attention_mask"].dtype UpperCamelCase__ : str = torch.cat( [common_inputs["attention_mask"], torch.ones(_a , _a , dtype=_a )] , dim=1 ) UpperCamelCase__ : Optional[Any] = [ (torch.zeros(_a ), torch.zeros(_a )) for _ in range(_a ) ] return common_inputs def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : Union[str, Any] = -1 , SCREAMING_SNAKE_CASE : int = False , SCREAMING_SNAKE_CASE : Tuple = None , ): '''simple docstring''' UpperCamelCase__ : str = compute_effective_axis_dimension( _a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase__ : List[Any] = tokenizer.num_special_tokens_to_add(_a ) UpperCamelCase__ : int = compute_effective_axis_dimension( _a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_a ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase__ : Union[str, Any] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCamelCase__ : List[Any] = dict(tokenizer(_a , return_tensors=_a ) ) return common_inputs def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] = -1 , SCREAMING_SNAKE_CASE : Optional[Any] = -1 , SCREAMING_SNAKE_CASE : List[str] = False , SCREAMING_SNAKE_CASE : str = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase__ : Any = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a ) else: UpperCamelCase__ : Optional[Any] = self._generate_dummy_inputs_for_causal_lm( _a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a ) return common_inputs def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase__ : str = super()._flatten_past_key_values_(_a , _a , _a , _a ) else: UpperCamelCase__ : Any = super(_a , self )._flatten_past_key_values_( _a , _a , _a , _a ) @property def __lowercase ( self : Tuple ): '''simple docstring''' return 1e-4
361
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Dict =logging.get_logger(__name__) class __a ( A__ ): _lowerCAmelCase : Optional[int] = '''timm_backbone''' def __init__( self : Dict , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : List[Any]=3 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : int , ): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE ) UpperCamelCase__ : Union[str, Any] = backbone UpperCamelCase__ : Dict = num_channels UpperCamelCase__ : str = features_only UpperCamelCase__ : Dict = use_pretrained_backbone UpperCamelCase__ : Tuple = True UpperCamelCase__ : List[Any] = out_indices if out_indices is not None else (-1,)
196
0
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features _SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : List[str] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) _SCREAMING_SNAKE_CASE : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _snake_case : lowerCAmelCase_ : str = field( default=lowercase_ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase_ )} ) lowerCAmelCase_ : str = field( default=lowercase_ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} ) lowerCAmelCase_ : int = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) lowerCAmelCase_ : int = field( default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) lowerCAmelCase_ : int = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) lowerCAmelCase_ : int = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) lowerCAmelCase_ : bool = field( default=lowercase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) lowerCAmelCase_ : bool = field( default=lowercase_ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} ) lowerCAmelCase_ : float = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) lowerCAmelCase_ : int = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) lowerCAmelCase_ : int = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) lowerCAmelCase_ : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} ) class _snake_case ( lowercase_ ): lowerCAmelCase_ : int = "train" lowerCAmelCase_ : Tuple = "dev" class _snake_case ( lowercase_ ): lowerCAmelCase_ : SquadDataTrainingArguments lowerCAmelCase_ : List[SquadFeatures] lowerCAmelCase_ : Split lowerCAmelCase_ : bool def __init__( self , a__ , a__ , a__ = None , a__ = Split.train , a__ = False , a__ = None , a__ = "pt" , ) -> Any: '''simple docstring''' snake_case_ = args snake_case_ = is_language_sensitive snake_case_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(a__ , a__ ): try: snake_case_ = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) snake_case_ = mode # Load data features from cache or dataset file snake_case_ = "v2" if args.version_2_with_negative else "v1" snake_case_ = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case_ = cached_features_file + ".lock" with FileLock(a__ ): if os.path.exists(a__ ) and not args.overwrite_cache: snake_case_ = time.time() snake_case_ = torch.load(a__ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. snake_case_ = self.old_features["features"] snake_case_ = self.old_features.get("dataset" , a__ ) snake_case_ = self.old_features.get("examples" , a__ ) logger.info( F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in' " future run" ) else: if mode == Split.dev: snake_case_ = self.processor.get_dev_examples(args.data_dir ) else: snake_case_ = self.processor.get_train_examples(args.data_dir ) snake_case_ , snake_case_ = squad_convert_examples_to_features( examples=self.examples , tokenizer=a__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=a__ , ) snake_case_ = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , a__ , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self ) -> str: '''simple docstring''' return len(self.features ) def __getitem__( self , a__ ) -> Dict[str, torch.Tensor]: '''simple docstring''' snake_case_ = self.features[i] snake_case_ = torch.tensor(feature.input_ids , dtype=torch.long ) snake_case_ = torch.tensor(feature.attention_mask , dtype=torch.long ) snake_case_ = torch.tensor(feature.token_type_ids , dtype=torch.long ) snake_case_ = torch.tensor(feature.cls_index , dtype=torch.long ) snake_case_ = torch.tensor(feature.p_mask , dtype=torch.float ) snake_case_ = torch.tensor(feature.is_impossible , dtype=torch.float ) snake_case_ = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: snake_case_ = torch.tensor(feature.start_position , dtype=torch.long ) snake_case_ = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
85
"""simple docstring""" def lowercase ( lowerCAmelCase__ : list ) -> bool: if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(lowerCAmelCase__ ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(lowerCAmelCase__ ) == 1: return True __a = series[1] - series[0] for index in range(len(lowerCAmelCase__ ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def lowercase ( lowerCAmelCase__ : list ) -> float: if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(lowerCAmelCase__ ) == 0: raise ValueError('''Input list must be a non empty list''' ) __a = 0 for val in series: answer += val return answer / len(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
45
0
from __future__ import annotations def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] ) -> bool: return len(set(__UpperCamelCase ) ) == len(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
177
from __future__ import annotations _lowerCamelCase = list[list[int]] # assigning initial values to the grid _lowerCamelCase = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution _lowerCamelCase = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> Matrix | None: if location := find_empty_location(__UpperCamelCase ): UpperCAmelCase_ , UpperCAmelCase_ = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): UpperCAmelCase_ = digit if sudoku(__UpperCamelCase ) is not None: return grid UpperCAmelCase_ = 0 return None def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> None: for row in grid: for cell in row: print(__UpperCamelCase , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('\nExample grid:\n' + '=' * 20) print_solution(example_grid) print('\nExample grid solution:') _lowerCamelCase = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('Cannot find a solution.')
177
1
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class __A ( unittest.TestCase , UpperCamelCase__ ): def _lowercase (self : Any ): UpperCAmelCase_ = load_tool("text-classification" ) self.tool.setup() UpperCAmelCase_ = load_tool("text-classification" , remote=__a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(__a , "positive" ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = self.remote_tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(__a , "positive" ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(__a , "positive" ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(__a , "positive" )
1
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class UpperCAmelCase_ ( snake_case ): @staticmethod @abstractmethod def _lowerCamelCase ( UpperCamelCase_ ) -> Union[str, Any]: raise NotImplementedError() @abstractmethod def _lowerCamelCase ( self ) -> str: raise NotImplementedError()
249
0
import argparse import collections import json import os import re import string import sys import numpy as np __lowerCAmelCase = re.compile(r'\b(a|an|the)\b', re.UNICODE) __lowerCAmelCase = None def __SCREAMING_SNAKE_CASE ( ): _snake_case = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" ) parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" ) parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" ) parser.add_argument( """--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" ) parser.add_argument( """--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" ) parser.add_argument( """--na-prob-thresh""" , """-t""" , type=_SCREAMING_SNAKE_CASE , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , ) parser.add_argument( """--out-image-dir""" , """-p""" , metavar="""out_images""" , default=_SCREAMING_SNAKE_CASE , help="""Save precision-recall curves to directory.""" ) parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: _snake_case = bool(qa["""answers"""]["""text"""] ) return qid_to_has_ans def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): def remove_articles(_SCREAMING_SNAKE_CASE ): return ARTICLES_REGEX.sub(""" """ , _SCREAMING_SNAKE_CASE ) def white_space_fix(_SCREAMING_SNAKE_CASE ): return " ".join(text.split() ) def remove_punc(_SCREAMING_SNAKE_CASE ): _snake_case = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_SCREAMING_SNAKE_CASE ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): if not s: return [] return normalize_answer(_SCREAMING_SNAKE_CASE ).split() def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = get_tokens(_SCREAMING_SNAKE_CASE ) _snake_case = get_tokens(_SCREAMING_SNAKE_CASE ) _snake_case = collections.Counter(_SCREAMING_SNAKE_CASE ) & collections.Counter(_SCREAMING_SNAKE_CASE ) _snake_case = sum(common.values() ) if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 _snake_case = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE ) _snake_case = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE ) _snake_case = (2 * precision * recall) / (precision + recall) return fa def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = {} _snake_case = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: _snake_case = qa["""id"""] _snake_case = [t for t in qa["""answers"""]["""text"""] if normalize_answer(_SCREAMING_SNAKE_CASE )] if not gold_answers: # For unanswerable questions, only correct answer is empty string _snake_case = [""""""] if qid not in preds: print(f"""Missing prediction for {qid}""" ) continue _snake_case = preds[qid] # Take max over all gold answers _snake_case = max(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers ) _snake_case = max(compute_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers ) return exact_scores, fa_scores def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = {} for qid, s in scores.items(): _snake_case = na_probs[qid] > na_prob_thresh if pred_na: _snake_case = float(not qid_to_has_ans[qid] ) else: _snake_case = s return new_scores def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ): if not qid_list: _snake_case = len(_SCREAMING_SNAKE_CASE ) return collections.OrderedDict( [ ("""exact""", 100.0 * sum(exact_scores.values() ) / total), ("""f1""", 100.0 * sum(fa_scores.values() ) / total), ("""total""", total), ] ) else: _snake_case = len(_SCREAMING_SNAKE_CASE ) return collections.OrderedDict( [ ("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("""total""", total), ] ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): for k in new_eval: _snake_case = new_eval[k] def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): plt.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color="""b""" , alpha=0.2 , where="""post""" ) plt.fill_between(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step="""post""" , alpha=0.2 , color="""b""" ) plt.xlabel("""Recall""" ) plt.ylabel("""Precision""" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(_SCREAMING_SNAKE_CASE ) plt.savefig(_SCREAMING_SNAKE_CASE ) plt.clf() def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ): _snake_case = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] ) _snake_case = 0.0 _snake_case = 1.0 _snake_case = 0.0 _snake_case = [1.0] _snake_case = [0.0] _snake_case = 0.0 for i, qid in enumerate(_SCREAMING_SNAKE_CASE ): if qid_to_has_ans[qid]: true_pos += scores[qid] _snake_case = true_pos / float(i + 1 ) _snake_case = true_pos / float(_SCREAMING_SNAKE_CASE ) if i == len(_SCREAMING_SNAKE_CASE ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_SCREAMING_SNAKE_CASE ) recalls.append(_SCREAMING_SNAKE_CASE ) if out_image: plot_pr_curve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return {"ap": 100.0 * avg_prec} def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if out_image_dir and not os.path.exists(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) _snake_case = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return _snake_case = make_precision_recall_eval( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , ) _snake_case = make_precision_recall_eval( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , ) _snake_case = {k: float(_SCREAMING_SNAKE_CASE ) for k, v in qid_to_has_ans.items()} _snake_case = make_precision_recall_eval( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """pr_exact""" ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """pr_f1""" ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """pr_oracle""" ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if not qid_list: return _snake_case = [na_probs[k] for k in qid_list] _snake_case = np.ones_like(_SCREAMING_SNAKE_CASE ) / float(len(_SCREAMING_SNAKE_CASE ) ) plt.hist(_SCREAMING_SNAKE_CASE , weights=_SCREAMING_SNAKE_CASE , bins=20 , range=(0.0, 1.0) ) plt.xlabel("""Model probability of no-answer""" ) plt.ylabel("""Proportion of dataset""" ) plt.title(f"""Histogram of no-answer probability: {name}""" ) plt.savefig(os.path.join(_SCREAMING_SNAKE_CASE , f"""na_prob_hist_{name}.png""" ) ) plt.clf() def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) _snake_case = num_no_ans _snake_case = cur_score _snake_case = 0.0 _snake_case = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] ) for i, qid in enumerate(_SCREAMING_SNAKE_CASE ): if qid not in scores: continue if qid_to_has_ans[qid]: _snake_case = scores[qid] else: if preds[qid]: _snake_case = -1 else: _snake_case = 0 cur_score += diff if cur_score > best_score: _snake_case = cur_score _snake_case = na_probs[qid] return 100.0 * best_score / len(_SCREAMING_SNAKE_CASE ), best_thresh def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case, _snake_case = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case, _snake_case = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case = best_exact _snake_case = exact_thresh _snake_case = best_fa _snake_case = fa_thresh def __SCREAMING_SNAKE_CASE ( ): with open(OPTS.data_file ) as f: _snake_case = json.load(_SCREAMING_SNAKE_CASE ) _snake_case = dataset_json["""data"""] with open(OPTS.pred_file ) as f: _snake_case = json.load(_SCREAMING_SNAKE_CASE ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: _snake_case = json.load(_SCREAMING_SNAKE_CASE ) else: _snake_case = {k: 0.0 for k in preds} _snake_case = make_qid_to_has_ans(_SCREAMING_SNAKE_CASE ) # maps qid to True/False _snake_case = [k for k, v in qid_to_has_ans.items() if v] _snake_case = [k for k, v in qid_to_has_ans.items() if not v] _snake_case, _snake_case = get_raw_scores(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _snake_case = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh ) _snake_case = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh ) _snake_case = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if has_ans_qids: _snake_case = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """HasAns""" ) if no_ans_qids: _snake_case = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE ) merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """NoAns""" ) if OPTS.na_prob_file: find_all_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir ) histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , """hasAns""" ) histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , """noAns""" ) if OPTS.out_file: with open(OPTS.out_file , """w""" ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: print(json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) ) if __name__ == "__main__": __lowerCAmelCase = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main()
370
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( __snake_case , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = DiTPipeline lowerCAmelCase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } lowerCAmelCase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS lowerCAmelCase_ = False def lowercase (self ) -> Union[str, Any]: torch.manual_seed(0 ) _snake_case = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=UpperCAmelCase , ) _snake_case = AutoencoderKL() _snake_case = DDIMScheduler() _snake_case = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def lowercase (self , UpperCAmelCase , UpperCAmelCase=0 ) -> List[str]: if str(UpperCAmelCase ).startswith("""mps""" ): _snake_case = torch.manual_seed(UpperCAmelCase ) else: _snake_case = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase ) _snake_case = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def lowercase (self ) -> Union[str, Any]: _snake_case = """cpu""" _snake_case = self.get_dummy_components() _snake_case = self.pipeline_class(**UpperCAmelCase ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) _snake_case = self.get_dummy_inputs(UpperCAmelCase ) _snake_case = pipe(**UpperCAmelCase ).images _snake_case = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _snake_case = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _snake_case = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCAmelCase , 1e-3 ) def lowercase (self ) -> List[str]: self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowercase (self ) -> Union[str, Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self ) -> Tuple: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase (self ) -> Any: _snake_case = torch.manual_seed(0 ) _snake_case = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) _snake_case = ["""vase""", """umbrella""", """white shark""", """white wolf"""] _snake_case = pipe.get_label_ids(UpperCAmelCase ) _snake_case = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(UpperCAmelCase , UpperCAmelCase ): _snake_case = load_numpy( f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-2 def lowercase (self ) -> Union[str, Any]: _snake_case = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) _snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) _snake_case = ["""vase""", """umbrella"""] _snake_case = pipe.get_label_ids(UpperCAmelCase ) _snake_case = torch.manual_seed(0 ) _snake_case = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(UpperCAmelCase , UpperCAmelCase ): _snake_case = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" f"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-1
270
0
'''simple docstring''' import string def lowerCamelCase__ ( _A ): for key in range(len(string.ascii_uppercase ) ): a : Dict = '' for symbol in message: if symbol in string.ascii_uppercase: a : Union[str, Any] = string.ascii_uppercase.find(_A ) a : List[str] = num - key if num < 0: a : Union[str, Any] = num + len(string.ascii_uppercase ) a : str = translated + string.ascii_uppercase[num] else: a : List[str] = translated + symbol print(f"""Decryption using Key #{key}: {translated}""" ) def lowerCamelCase__ ( ): a : Tuple = input('Encrypted message: ' ) a : int = message.upper() decrypt(_A ) if __name__ == "__main__": import doctest doctest.testmod() main()
297
import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = CodeGenTokenizer lowerCAmelCase_ = CodeGenTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = {'''add_prefix_space''': True} lowerCAmelCase_ = False def snake_case__ ( self : List[Any] ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] snake_case_ = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) snake_case_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] snake_case_ = {"unk_token": "<unk>"} snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__lowercase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__lowercase ) ) def snake_case__ ( self : Union[str, Any] , **__lowercase : List[str] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__lowercase ) def snake_case__ ( self : Optional[Any] , **__lowercase : Union[str, Any] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase ) def snake_case__ ( self : Optional[int] , __lowercase : List[str] ): """simple docstring""" snake_case_ = "lower newer" snake_case_ = "lower newer" return input_text, output_text def snake_case__ ( self : Optional[Any] ): """simple docstring""" snake_case_ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case_ = "lower newer" snake_case_ = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] snake_case_ = tokenizer.tokenize(__lowercase , add_prefix_space=__lowercase ) self.assertListEqual(__lowercase , __lowercase ) snake_case_ = tokens + [tokenizer.unk_token] snake_case_ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase ) def snake_case__ ( self : Optional[int] ): """simple docstring""" if not self.test_rust_tokenizer: return snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer(add_prefix_space=__lowercase ) snake_case_ = "lower newer" # Testing tokenization snake_case_ = tokenizer.tokenize(__lowercase , add_prefix_space=__lowercase ) snake_case_ = rust_tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) # Testing conversion to ids without special tokens snake_case_ = tokenizer.encode(__lowercase , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) snake_case_ = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) self.assertListEqual(__lowercase , __lowercase ) # Testing conversion to ids with special tokens snake_case_ = self.get_rust_tokenizer(add_prefix_space=__lowercase ) snake_case_ = tokenizer.encode(__lowercase , add_prefix_space=__lowercase ) snake_case_ = rust_tokenizer.encode(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) # Testing the unknown token snake_case_ = tokens + [rust_tokenizer.unk_token] snake_case_ = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase ) def snake_case__ ( self : Any , *__lowercase : Union[str, Any] , **__lowercase : Tuple ): """simple docstring""" pass def snake_case__ ( self : int , __lowercase : str=15 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): snake_case_ = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase ) # Simple input snake_case_ = "This is a simple input" snake_case_ = ["This is a simple input 1", "This is a simple input 2"] snake_case_ = ("This is a simple input", "This is a pair") snake_case_ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding="max_length" ) # Simple input self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding="max_length" ) # Simple input self.assertRaises( __lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding="max_length" , ) # Pair input self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding="max_length" ) # Pair input self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding="max_length" ) # Pair input self.assertRaises( __lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding="max_length" , ) def snake_case__ ( self : str ): """simple docstring""" snake_case_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input snake_case_ = "This is a simple input" snake_case_ = ["This is a simple input looooooooong", "This is a simple input"] snake_case_ = ("This is a simple input", "This is a pair") snake_case_ = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] snake_case_ = tokenizer.pad_token_id snake_case_ = tokenizer(__lowercase , padding="max_length" , max_length=30 , return_tensors="np" ) snake_case_ = tokenizer(__lowercase , padding=__lowercase , truncate=__lowercase , return_tensors="np" ) snake_case_ = tokenizer(*__lowercase , padding="max_length" , max_length=60 , return_tensors="np" ) snake_case_ = tokenizer(__lowercase , padding=__lowercase , truncate=__lowercase , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def snake_case__ ( self : Tuple ): """simple docstring""" snake_case_ = "$$$" snake_case_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowercase , add_bos_token=__lowercase ) snake_case_ = "This is a simple input" snake_case_ = ["This is a simple input 1", "This is a simple input 2"] snake_case_ = tokenizer.bos_token_id snake_case_ = tokenizer(__lowercase ) snake_case_ = tokenizer(__lowercase ) self.assertEqual(out_s.input_ids[0] , __lowercase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) snake_case_ = tokenizer.decode(out_s.input_ids ) snake_case_ = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __lowercase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def snake_case__ ( self : Tuple ): """simple docstring""" snake_case_ = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" ) snake_case_ = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#" snake_case_ = "\nif len_a > len_b: result = a\nelse: result = b" snake_case_ = tokenizer.encode(__lowercase ) snake_case_ = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"] snake_case_ = tokenizer.decode(__lowercase , truncate_before_pattern=__lowercase ) self.assertEqual(__lowercase , __lowercase ) def snake_case__ ( self : Dict ): """simple docstring""" pass
187
0
"""simple docstring""" # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowerCamelCase_( _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Union[str, Any] = [False] * len(__A ) _lowerCamelCase : Optional[Any] = [-1] * len(__A ) def dfs(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Dict = True _lowerCamelCase : str = c for u in graph[v]: if not visited[u]: dfs(__A , 1 - c ) for i in range(len(__A ) ): if not visited[i]: dfs(__A , 0 ) for i in range(len(__A ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph _lowerCAmelCase : Tuple = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
357
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version _lowerCAmelCase : List[Any] = get_logger(__name__) class A_ : lowerCAmelCase__ = 'dummy_data' lowerCAmelCase__ = 'datasets' lowerCAmelCase__ = False def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,): '''simple docstring''' _lowerCamelCase : str = 0 _lowerCamelCase : List[str] = dataset_name _lowerCamelCase : Optional[int] = cache_dir _lowerCamelCase : Optional[int] = use_local_dummy_data _lowerCamelCase : int = config # download_callbacks take a single url as input _lowerCamelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _lowerCamelCase : int = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _lowerCamelCase : Tuple = str(__lowerCAmelCase ) # to be downloaded _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Dict = None @property def _lowercase ( self: str ): '''simple docstring''' if self._dummy_file is None: _lowerCamelCase : List[str] = self.download_dummy_data() return self._dummy_file @property def _lowercase ( self: str ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join("dummy" ,self.version_name ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' return os.path.join(self.dummy_data_folder ,"dummy_data.zip" ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Dict = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _lowerCamelCase : Optional[int] = cached_path( __lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase ) return os.path.join(__lowerCAmelCase ,self.dummy_file_name ) @property def _lowercase ( self: Tuple ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def _lowercase ( self: List[str] ): '''simple docstring''' if self._bucket_url is None: _lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) ) return self._bucket_url @property def _lowercase ( self: Union[str, Any] ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _lowerCamelCase : Tuple = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _lowerCamelCase : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase ) elif isinstance(__lowerCAmelCase ,(list, tuple) ): return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase ) else: return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ): '''simple docstring''' return self.download_and_extract(__lowerCAmelCase ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' return path def _lowercase ( self: Optional[int] ): '''simple docstring''' return {} def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : str = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): for single_url in single_urls: download_callback(__lowerCAmelCase ) else: _lowerCamelCase : Union[str, Any] = single_urls download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls] else: _lowerCamelCase : Union[str, Any] = single_urls _lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) _lowerCamelCase : List[Any] = value # make sure that values are unique if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Dict = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url ) _lowerCamelCase : Optional[Any] = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__lowerCAmelCase ) return dummy_data_list def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__lowerCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _lowercase ( self: Optional[Any] ): '''simple docstring''' pass def _lowercase ( self: Optional[int] ): '''simple docstring''' pass def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' def _iter_archive_members(__lowerCAmelCase: Any ): # this preserves the order of the members inside the ZIP archive _lowerCamelCase : Tuple = Path(self.dummy_file ).parent _lowerCamelCase : str = path.relative_to(__lowerCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _lowerCamelCase : Optional[int] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase ) _lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" ) def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ): _lowerCamelCase : List[Any] = [paths] for path in paths: if os.path.isfile(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ): if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__lowerCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
340
0
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or number < 0: raise ValueError('Input must be a non-negative integer' ) A__ = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
7
import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class __a : @staticmethod def SCREAMING_SNAKE_CASE__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]: '''simple docstring''' pass def snake_case_ ( snake_case ) -> Optional[Any]: return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. __lowerCAmelCase = ( '''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png''' ) @is_pipeline_test @require_torch @require_vision class __a ( unittest.TestCase ): __lowercase : Dict = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: '''simple docstring''' lowercase__: Optional[Any] = pipeline( 'document-question-answering' , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) lowercase__: int = INVOICE_URL lowercase__: Dict = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '' ) ) ) lowercase__: str = 'What is the placebo?' lowercase__: Any = [ { 'image': load_image(lowerCAmelCase__ ), 'question': question, }, { 'image': image, 'question': question, }, { 'image': image, 'question': question, 'word_boxes': word_boxes, }, ] return dqa_pipeline, examples def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: '''simple docstring''' lowercase__: str = dqa_pipeline(lowerCAmelCase__ , top_k=2 ) self.assertEqual( lowerCAmelCase__ , [ [ {'score': ANY(lowerCAmelCase__ ), 'answer': ANY(lowerCAmelCase__ ), 'start': ANY(lowerCAmelCase__ ), 'end': ANY(lowerCAmelCase__ )}, {'score': ANY(lowerCAmelCase__ ), 'answer': ANY(lowerCAmelCase__ ), 'start': ANY(lowerCAmelCase__ ), 'end': ANY(lowerCAmelCase__ )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' lowercase__: Union[str, Any] = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' ) lowercase__: Optional[Any] = INVOICE_URL lowercase__: int = 'How many cats are there?' lowercase__: List[str] = [ {'score': 0.0_0_0_1, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39}, {'score': 0.0_0_0_1, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40}, ] lowercase__: Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 ) self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ ) lowercase__: Tuple = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably lowercase__: str = './tests/fixtures/tests_samples/COCO/000000039769.png' lowercase__: Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 ) self.assertEqual(lowerCAmelCase__ , [] ) # We can optionnally pass directly the words and bounding boxes lowercase__: int = './tests/fixtures/tests_samples/COCO/000000039769.png' lowercase__: List[Any] = [] lowercase__: Optional[int] = [] lowercase__: Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 ) self.assertEqual(lowerCAmelCase__ , [] ) @slow @require_torch @require_detectrona @require_pytesseract def SCREAMING_SNAKE_CASE__ ( self ) -> Any: '''simple docstring''' lowercase__: List[str] = pipeline( 'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , ) lowercase__: int = INVOICE_URL lowercase__: str = 'What is the invoice number?' lowercase__: Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) lowercase__: Any = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) lowercase__: Optional[int] = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ [ {'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def SCREAMING_SNAKE_CASE__ ( self ) -> str: '''simple docstring''' lowercase__: Any = pipeline( 'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , ) lowercase__: Optional[int] = INVOICE_URL lowercase__: Union[str, Any] = 'What is the invoice number?' lowercase__: Optional[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23}, {'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) lowercase__: Tuple = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23}, {'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) lowercase__: Dict = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ [ {'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23}, {'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def SCREAMING_SNAKE_CASE__ ( self ) -> str: '''simple docstring''' lowercase__: Optional[Any] = AutoTokenizer.from_pretrained( 'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowerCAmelCase__ ) lowercase__: Optional[Any] = pipeline( 'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowerCAmelCase__ , revision='3dc6de3' , ) lowercase__: List[str] = INVOICE_URL lowercase__: Union[str, Any] = 'What is the invoice number?' lowercase__: Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23}, ] , ) lowercase__: List[str] = dqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23}, ] , ) lowercase__: int = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ [ {'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23}, ] ] * 2 , ) lowercase__: Any = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '' ) ) ) # This model should also work if `image` is set to None lowercase__: List[Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' lowercase__: Any = AutoTokenizer.from_pretrained( 'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowerCAmelCase__ ) lowercase__: str = pipeline( 'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowerCAmelCase__ , revision='3dc6de3' , max_seq_len=50 , ) lowercase__: Optional[Any] = INVOICE_URL lowercase__: Optional[Any] = 'What is the invoice number?' lowercase__: Optional[int] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) lowercase__: Any = dqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ [ {'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16}, ] ] * 2 , ) lowercase__: Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '' ) ) ) # This model should also work if `image` is set to None lowercase__: Tuple = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=4 ) , [ {'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16}, ] , ) @slow @require_torch def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase__: List[Any] = pipeline( 'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , ) lowercase__: int = INVOICE_URL lowercase__: int = 'What is the invoice number?' lowercase__: Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 ) self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{'answer': 'us-001'}] ) @require_tf @unittest.skip('Document question answering not implemented in TF' ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' pass
196
0
from __future__ import annotations from collections.abc import Generator def A__ ( ) -> Generator[int, None, None]: UpperCamelCase_: dict[int, int] = {} UpperCamelCase_: int = 2 while True: UpperCamelCase_: int = factor_map.pop(lowerCamelCase , lowerCamelCase ) if factor: UpperCamelCase_: Optional[Any] = factor + prime while x in factor_map: x += factor UpperCamelCase_: List[str] = factor else: UpperCamelCase_: Dict = prime yield prime prime += 1 def A__ ( lowerCamelCase = 1E1_0 ) -> int: UpperCamelCase_: Union[str, Any] = sieve() UpperCamelCase_: List[Any] = 1 while True: UpperCamelCase_: Tuple = next(lowerCamelCase ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(lowerCamelCase ) n += 2 if __name__ == "__main__": print(solution())
223
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _UpperCamelCase ( _A ): '''simple docstring''' __UpperCamelCase : Union[str, Any] = ["""image_processor""", """tokenizer"""] __UpperCamelCase : List[Any] = """AutoImageProcessor""" __UpperCamelCase : Tuple = """AutoTokenizer""" def __init__( self : Any , snake_case_ : Optional[Any] , snake_case_ : Any ): super().__init__(snake_case_ , snake_case_ ) UpperCamelCase_: int = self.image_processor def __call__( self : str , snake_case_ : Optional[int]=None , snake_case_ : int=None , snake_case_ : Dict=None , **snake_case_ : Optional[int] ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: UpperCamelCase_: List[Any] = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ ) if images is not None: UpperCamelCase_: Any = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ ) if text is not None and images is not None: UpperCamelCase_: Tuple = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ ) def lowerCAmelCase__ ( self : List[str] , *snake_case_ : int , **snake_case_ : Optional[Any] ): return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ ) def lowerCAmelCase__ ( self : int , *snake_case_ : Optional[Any] , **snake_case_ : str ): return self.tokenizer.decode(*snake_case_ , **snake_case_ ) @property def lowerCAmelCase__ ( self : Union[str, Any] ): return ["input_ids", "attention_mask", "pixel_values"]
223
1
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __A = "python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Dict: require_version(deps[pkg] , __UpperCAmelCase )
177
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class UpperCAmelCase (unittest.TestCase ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , ): lowercase__: Dict = parent lowercase__: List[str] = batch_size lowercase__: Optional[Any] = seq_length lowercase__: List[Any] = is_training lowercase__: int = use_attention_mask lowercase__: Tuple = use_token_type_ids lowercase__: Union[str, Any] = use_labels lowercase__: str = vocab_size lowercase__: str = hidden_size lowercase__: str = num_hidden_layers lowercase__: Optional[int] = num_attention_heads lowercase__: List[str] = intermediate_size lowercase__: List[str] = hidden_act lowercase__: Tuple = hidden_dropout_prob lowercase__: int = attention_probs_dropout_prob lowercase__: int = max_position_embeddings lowercase__: Union[str, Any] = type_vocab_size lowercase__: List[Any] = type_sequence_label_size lowercase__: Any = initializer_range lowercase__: str = num_choices def _snake_case ( self ): lowercase__: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__: List[Any] = None if self.use_attention_mask: lowercase__: Tuple = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__: List[Any] = None if self.use_token_type_ids: lowercase__: str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__: Optional[int] = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _snake_case ( self ): lowercase__: str = self.prepare_config_and_inputs() lowercase__, lowercase__, lowercase__, lowercase__: Optional[Any] = config_and_inputs lowercase__: Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :List[str] = True _UpperCAmelCase :Union[str, Any] = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _snake_case ( self ): lowercase__: str = FlaxRoFormerModelTester(self ) @slow def _snake_case ( self ): for model_class_name in self.all_model_classes: lowercase__: Dict = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=_UpperCAmelCase ) lowercase__: int = model(np.ones((1, 1) ) ) self.assertIsNotNone(_UpperCAmelCase ) @require_flax class UpperCAmelCase (unittest.TestCase ): """simple docstring""" @slow def _snake_case ( self ): lowercase__: Any = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) lowercase__: Optional[int] = jnp.array([[0, 1, 2, 3, 4, 5]] ) lowercase__: List[Any] = model(_UpperCAmelCase )[0] lowercase__: str = 50000 lowercase__: Tuple = (1, 6, vocab_size) self.assertEqual(output.shape , _UpperCAmelCase ) lowercase__: List[Any] = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
177
1
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel A_ :int = logging.getLogger(__name__) def A ( a_ ,a_ ) -> Tuple: # save results if os.path.exists(a_ ): if os.path.exists(os.path.join(a_ ,'config.json' ) ) and os.path.isfile( os.path.join(a_ ,'config.json' ) ): os.remove(os.path.join(a_ ,'config.json' ) ) if os.path.exists(os.path.join(a_ ,'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(a_ ,'pytorch_model.bin' ) ): os.remove(os.path.join(a_ ,'pytorch_model.bin' ) ) else: os.makedirs(a_ ) model.save_pretrained(a_ ) def A ( a_ ,a_=False ) -> Optional[Any]: __UpperCamelCase : Optional[Any] =2 if unlogit: __UpperCamelCase : List[str] =torch.pow(a_ ,a_ ) __UpperCamelCase : str =p * torch.log(a_ ) __UpperCamelCase : str =0 return -plogp.sum(dim=-1 ) def A ( a_ ) -> str: logger.info('lv, h >\t' + '\t'.join(F'{x + 1}' for x in range(len(a_ ) ) ) ) for row in range(len(a_ ) ): if tensor.dtype != torch.long: logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:.5f}' for x in tensor[row].cpu().data ) ) else: logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:d}' for x in tensor[row].cpu().data ) ) def A ( a_ ,a_ ,a_ ,a_=True ,a_=True ,a_=None ,a_=False ) -> List[str]: __UpperCamelCase : Any =model.config.num_hidden_layers, model.config.num_attention_heads __UpperCamelCase : Union[str, Any] =torch.zeros(a_ ,a_ ).to(args.device ) __UpperCamelCase : List[Any] =torch.zeros(a_ ,a_ ).to(args.device ) if head_mask is None: __UpperCamelCase : str =torch.ones(a_ ,a_ ).to(args.device ) head_mask.requires_grad_(requires_grad=a_ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: __UpperCamelCase : List[str] =None __UpperCamelCase : str =0.0 __UpperCamelCase : str =0.0 for step, inputs in enumerate(tqdm(a_ ,desc='Iteration' ,disable=args.local_rank not in [-1, 0] ) ): __UpperCamelCase : List[str] =tuple(t.to(args.device ) for t in inputs ) (__UpperCamelCase ) : Optional[int] =inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) __UpperCamelCase : List[str] =model(a_ ,labels=a_ ,head_mask=a_ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) __UpperCamelCase : Dict =( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(a_ ): __UpperCamelCase : Any =entropy(attn.detach() ,a_ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(a_ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: __UpperCamelCase : List[Any] =2 __UpperCamelCase : Any =torch.pow(torch.pow(a_ ,a_ ).sum(-1 ) ,1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: __UpperCamelCase : Tuple =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(a_ ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(a_ ) logger.info('Head ranked by importance scores' ) __UpperCamelCase : List[Any] =torch.zeros(head_importance.numel() ,dtype=torch.long ,device=args.device ) __UpperCamelCase : Union[str, Any] =torch.arange( head_importance.numel() ,device=args.device ) __UpperCamelCase : List[str] =head_ranks.view_as(a_ ) print_ad_tensor(a_ ) return attn_entropy, head_importance, total_loss def A ( a_ ,a_ ,a_ ) -> str: __UpperCamelCase : str =compute_heads_importance(a_ ,a_ ,a_ ,compute_entropy=a_ ) __UpperCamelCase : Any =1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' ,a_ ,original_score * args.masking_threshold ) __UpperCamelCase : str =torch.ones_like(a_ ) __UpperCamelCase : Tuple =max(1 ,int(new_head_mask.numel() * args.masking_amount ) ) __UpperCamelCase : List[Any] =original_score while current_score >= original_score * args.masking_threshold: __UpperCamelCase : int =new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads __UpperCamelCase : Optional[Any] =float('Inf' ) __UpperCamelCase : List[Any] =head_importance.view(-1 ).sort()[1] if len(a_ ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads __UpperCamelCase : Tuple =current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' ,str(current_heads_to_mask.tolist() ) ) __UpperCamelCase : Tuple =new_head_mask.view(-1 ) __UpperCamelCase : List[str] =0.0 __UpperCamelCase : Optional[Any] =new_head_mask.view_as(a_ ) __UpperCamelCase : Any =new_head_mask.clone().detach() print_ad_tensor(a_ ) # Compute metric and head importance again __UpperCamelCase : Any =compute_heads_importance( a_ ,a_ ,a_ ,compute_entropy=a_ ,head_mask=a_ ) __UpperCamelCase : Optional[Any] =1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' ,a_ ,new_head_mask.sum() ,new_head_mask.sum() / new_head_mask.numel() * 100 ,) logger.info('Final head mask' ) print_ad_tensor(a_ ) np.save(os.path.join(args.output_dir ,'head_mask.npy' ) ,head_mask.detach().cpu().numpy() ) return head_mask def A ( a_ ,a_ ,a_ ,a_ ) -> List[str]: __UpperCamelCase : int =datetime.now() __UpperCamelCase : List[str] =compute_heads_importance( a_ ,a_ ,a_ ,compute_entropy=a_ ,compute_importance=a_ ,head_mask=a_ ) __UpperCamelCase : Optional[Any] =1 / loss __UpperCamelCase : Dict =datetime.now() - before_time __UpperCamelCase : Union[str, Any] =sum(p.numel() for p in model.parameters() ) __UpperCamelCase : int ={ layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(a_ ) ) } for k, v in heads_to_prune.items(): if isinstance(a_ ,a_ ): __UpperCamelCase : Dict =[ v, ] assert sum(len(a_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(a_ ) __UpperCamelCase : List[Any] =sum(p.numel() for p in model.parameters() ) __UpperCamelCase : str =datetime.now() __UpperCamelCase : Tuple =compute_heads_importance( a_ ,a_ ,a_ ,compute_entropy=a_ ,compute_importance=a_ ,head_mask=a_ ,actually_pruned=a_ ,) __UpperCamelCase : Union[str, Any] =1 / loss __UpperCamelCase : Optional[Any] =datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' ,a_ ,a_ ,pruned_num_params / original_num_params * 100 ,) logger.info('Pruning: score with masking: %f score with pruning: %f' ,a_ ,a_ ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' ,original_time / new_time * 100 ) save_model(a_ ,args.output_dir ) def A ( ) -> Union[str, Any]: __UpperCamelCase : Optional[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' ,default=a_ ,type=a_ ,required=a_ ,help='The input data dir. Should contain the .tsv files (or other data files) for the task.' ,) parser.add_argument( '--model_name_or_path' ,default=a_ ,type=a_ ,required=a_ ,help='Path to pretrained model or model identifier from huggingface.co/models' ,) parser.add_argument( '--output_dir' ,default=a_ ,type=a_ ,required=a_ ,help='The output directory where the model predictions and checkpoints will be written.' ,) # Other parameters parser.add_argument( '--config_name' ,default='' ,type=a_ ,help='Pretrained config name or path if not the same as model_name_or_path' ,) parser.add_argument( '--tokenizer_name' ,default='' ,type=a_ ,help='Pretrained tokenizer name or path if not the same as model_name_or_path' ,) parser.add_argument( '--cache_dir' ,default=a_ ,type=a_ ,help='Where do you want to store the pre-trained models downloaded from s3' ,) parser.add_argument( '--data_subset' ,type=a_ ,default=-1 ,help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' ,action='store_true' ,help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' ,action='store_true' ,help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' ,action='store_true' ,help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' ,action='store_true' ,help='Don\'t normalize all importance scores between 0 and 1' ,) parser.add_argument( '--try_masking' ,action='store_true' ,help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' ,default=0.9 ,type=a_ ,help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' ,) parser.add_argument( '--masking_amount' ,default=0.1 ,type=a_ ,help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' ,default='acc' ,type=a_ ,help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' ,default=128 ,type=a_ ,help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) ,) parser.add_argument('--batch_size' ,default=1 ,type=a_ ,help='Batch size.' ) parser.add_argument('--seed' ,type=a_ ,default=42 ) parser.add_argument('--local_rank' ,type=a_ ,default=-1 ,help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' ,action='store_true' ,help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' ,type=a_ ,default='' ,help='Can be used for distant debugging.' ) parser.add_argument('--server_port' ,type=a_ ,default='' ,help='Can be used for distant debugging.' ) __UpperCamelCase : int =parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=a_ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: __UpperCamelCase : Optional[int] =torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) __UpperCamelCase : Optional[Any] =0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) __UpperCamelCase : Union[str, Any] =torch.device('cuda' ,args.local_rank ) __UpperCamelCase : List[str] =1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device ,args.n_gpu ,bool(args.local_rank != -1 ) ) ) __UpperCamelCase : Any =GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: __UpperCamelCase : str =nn.parallel.DistributedDataParallel( a_ ,device_ids=[args.local_rank] ,output_device=args.local_rank ,find_unused_parameters=a_ ) elif args.n_gpu > 1: __UpperCamelCase : Union[str, Any] =nn.DataParallel(a_ ) # Print/save training arguments os.makedirs(args.output_dir ,exist_ok=a_ ) torch.save(a_ ,os.path.join(args.output_dir ,'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' ,a_ ) # Prepare dataset __UpperCamelCase : Optional[Any] =np.concatenate( [ np.loadtxt(args.data_dir ,dtype=np.intaa ), ] ) __UpperCamelCase : Optional[int] =(torch.from_numpy(a_ ),) __UpperCamelCase : str =TensorDataset(*a_ ) __UpperCamelCase : Optional[int] =RandomSampler(a_ ) __UpperCamelCase : List[str] =DataLoader(a_ ,sampler=a_ ,batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(a_ ,a_ ,a_ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: __UpperCamelCase : int =mask_heads(a_ ,a_ ,a_ ) prune_heads(a_ ,a_ ,a_ ,a_ ) if __name__ == "__main__": main()
366
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __A ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] =[ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertTrue(is_safetensors_compatible(lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =[ 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertTrue(is_safetensors_compatible(lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] =[ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] =[ 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', ] self.assertTrue(is_safetensors_compatible(lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =[ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', # Removed: 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Tuple =[ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] __UpperCamelCase : List[Any] ='fp16' self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : str =[ 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] __UpperCamelCase : str ='fp16' self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[int] =[ 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] __UpperCamelCase : int ='fp16' self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =[ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] __UpperCamelCase : str ='fp16' self.assertFalse(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] =[ 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', ] __UpperCamelCase : Any ='fp16' self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[Any] =[ 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', ] __UpperCamelCase : Tuple ='fp16' self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =[ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', # 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] __UpperCamelCase : Any ='fp16' self.assertFalse(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
245
0
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def UpperCAmelCase_ ( __lowerCamelCase : Optional[Any] ): lowercase_ :Dict = SwinConfig(image_size=1_92 ) if "base" in model_name: lowercase_ :List[str] = 6 lowercase_ :List[str] = 1_28 lowercase_ :Optional[int] = (2, 2, 18, 2) lowercase_ :Union[str, Any] = (4, 8, 16, 32) elif "large" in model_name: lowercase_ :Union[str, Any] = 12 lowercase_ :int = 1_92 lowercase_ :Any = (2, 2, 18, 2) lowercase_ :Optional[int] = (6, 12, 24, 48) else: raise ValueError("Model not supported, only supports base and large variants" ) lowercase_ :Optional[Any] = window_size lowercase_ :Any = embed_dim lowercase_ :List[str] = depths lowercase_ :Optional[Any] = num_heads return config def UpperCAmelCase_ ( __lowerCamelCase : int ): if "encoder.mask_token" in name: lowercase_ :Tuple = name.replace("encoder.mask_token" ,"embeddings.mask_token" ) if "encoder.patch_embed.proj" in name: lowercase_ :List[str] = name.replace("encoder.patch_embed.proj" ,"embeddings.patch_embeddings.projection" ) if "encoder.patch_embed.norm" in name: lowercase_ :int = name.replace("encoder.patch_embed.norm" ,"embeddings.norm" ) if "attn.proj" in name: lowercase_ :Tuple = name.replace("attn.proj" ,"attention.output.dense" ) if "attn" in name: lowercase_ :Tuple = name.replace("attn" ,"attention.self" ) if "norm1" in name: lowercase_ :List[str] = name.replace("norm1" ,"layernorm_before" ) if "norm2" in name: lowercase_ :str = name.replace("norm2" ,"layernorm_after" ) if "mlp.fc1" in name: lowercase_ :Optional[Any] = name.replace("mlp.fc1" ,"intermediate.dense" ) if "mlp.fc2" in name: lowercase_ :List[str] = name.replace("mlp.fc2" ,"output.dense" ) if name == "encoder.norm.weight": lowercase_ :List[Any] = "layernorm.weight" if name == "encoder.norm.bias": lowercase_ :Dict = "layernorm.bias" if "decoder" in name: pass else: lowercase_ :Union[str, Any] = "swin." + name return name def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : Tuple ): for key in orig_state_dict.copy().keys(): lowercase_ :Union[str, Any] = orig_state_dict.pop(__lowerCAmelCase ) if "attn_mask" in key: pass elif "qkv" in key: lowercase_ :Dict = key.split("." ) lowercase_ :List[str] = int(key_split[2] ) lowercase_ :Dict = int(key_split[4] ) lowercase_ :Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowercase_ :Dict = val[:dim, :] lowercase_ :str = val[ dim : dim * 2, : ] lowercase_ :Any = val[-dim:, :] else: lowercase_ :Tuple = val[ :dim ] lowercase_ :Optional[Any] = val[ dim : dim * 2 ] lowercase_ :List[str] = val[ -dim: ] else: lowercase_ :Optional[int] = val return orig_state_dict def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Tuple ): lowercase_ :Dict = torch.load(__lowerCAmelCase ,map_location="cpu" )["model"] lowercase_ :List[str] = get_swin_config(__lowerCAmelCase ) lowercase_ :int = SwinForMaskedImageModeling(__lowerCAmelCase ) model.eval() lowercase_ :List[Any] = convert_state_dict(__lowerCAmelCase ,__lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) lowercase_ :Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase_ :Union[str, Any] = ViTImageProcessor(size={"height": 1_92, "width": 1_92} ) lowercase_ :Optional[int] = Image.open(requests.get(__lowerCAmelCase ,stream=__lowerCAmelCase ).raw ) lowercase_ :List[str] = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ) with torch.no_grad(): lowercase_ :Tuple = model(**__lowerCAmelCase ).logits print(outputs.keys() ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(__lowerCAmelCase ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: print(F'Pushing model and image processor for {model_name} to hub' ) model.push_to_hub(F'microsoft/{model_name}' ) image_processor.push_to_hub(F'microsoft/{model_name}' ) if __name__ == "__main__": lowerCAmelCase : Tuple =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''swin-base-simmim-window6-192''', type=str, choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''], help='''Name of the Swin SimMIM model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''', type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCAmelCase : Tuple =parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
223
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ : Dict = { "facebook/mask2former-swin-small-coco-instance": ( "https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) class lowerCAmelCase__ ( __lowercase ): a__ : Any = """mask2former""" a__ : Dict = ["""swin"""] a__ : Any = {"""hidden_size""": """hidden_dim"""} def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 6 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 20_48 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 2_55 , SCREAMING_SNAKE_CASE__ : int = 1_00 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 2.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 1_25_44 , SCREAMING_SNAKE_CASE__ : float = 3.0 , SCREAMING_SNAKE_CASE__ : float = 0.75 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE__ : bool = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> str: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' ) __lowerCamelCase = CONFIG_MAPPING['''swin''']( image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = backbone_config.pop('''model_type''' ) __lowerCamelCase = CONFIG_MAPPING[backbone_model_type] __lowerCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ''' f'''Supported model types: {','.join(self.backbones_supported )}''' ) __lowerCamelCase = backbone_config __lowerCamelCase = feature_size __lowerCamelCase = mask_feature_size __lowerCamelCase = hidden_dim __lowerCamelCase = encoder_feedforward_dim __lowerCamelCase = activation_function __lowerCamelCase = encoder_layers __lowerCamelCase = decoder_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = dropout __lowerCamelCase = dim_feedforward __lowerCamelCase = pre_norm __lowerCamelCase = enforce_input_projection __lowerCamelCase = common_stride __lowerCamelCase = ignore_value __lowerCamelCase = num_queries __lowerCamelCase = no_object_weight __lowerCamelCase = class_weight __lowerCamelCase = mask_weight __lowerCamelCase = dice_weight __lowerCamelCase = train_num_points __lowerCamelCase = oversample_ratio __lowerCamelCase = importance_sample_ratio __lowerCamelCase = init_std __lowerCamelCase = init_xavier_std __lowerCamelCase = use_auxiliary_loss __lowerCamelCase = feature_strides __lowerCamelCase = output_auxiliary_logits __lowerCamelCase = decoder_layers super().__init__(**SCREAMING_SNAKE_CASE__ ) @classmethod def __A ( cls : Any , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]: return cls( backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) def __A ( self : Any ) -> Dict[str, any]: __lowerCamelCase = copy.deepcopy(self.__dict__ ) __lowerCamelCase = self.backbone_config.to_dict() __lowerCamelCase = self.__class__.model_type return output
270
0
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class lowerCamelCase ( unittest.TestCase ): def a_ ( self ): UpperCamelCase : List[Any] = inspect.getfile(accelerate.test_utils ) UpperCamelCase : Union[str, Any] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 UpperCamelCase : Optional[Any] = test_metrics @require_cpu def a_ ( self ): debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def a_ ( self ): debug_launcher(self.test_metrics.main ) @require_single_gpu def a_ ( self ): self.test_metrics.main() @require_multi_gpu def a_ ( self ): print(f'Found {torch.cuda.device_count()} devices.' ) UpperCamelCase : Union[str, Any] = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
27
"""simple docstring""" import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase ( unittest.TestCase ): def a_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def a_ ( self ): UpperCamelCase : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) UpperCamelCase : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) UpperCamelCase : Dict = """xvjiarui/stable-diffusion-2-inpainting""" UpperCamelCase , UpperCamelCase : List[str] = FlaxStableDiffusionInpaintPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = """Face of a yellow cat, high resolution, sitting on a park bench""" UpperCamelCase : List[str] = jax.random.PRNGKey(0 ) UpperCamelCase : Tuple = 50 UpperCamelCase : Dict = jax.device_count() UpperCamelCase : Optional[int] = num_samples * [prompt] UpperCamelCase : int = num_samples * [init_image] UpperCamelCase : List[Any] = num_samples * [mask_image] UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # shard inputs and rng UpperCamelCase : Optional[int] = replicate(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count() ) UpperCamelCase : str = shard(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = shard(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = shard(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = pipeline( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , jit=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = output.images.reshape(SCREAMING_SNAKE_CASE_ , 512 , 512 , 3 ) UpperCamelCase : List[Any] = images[0, 253:256, 253:256, -1] UpperCamelCase : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCamelCase : Dict = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
27
1
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 4000000 ) -> int: '''simple docstring''' A__ = [0, 1] A__ = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 A__ = 0 for j in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f"""{solution() = }""")
7
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowercase__ ( _UpperCAmelCase ): a_ =["""image_processor""", """tokenizer"""] a_ ="""LayoutLMv2ImageProcessor""" a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""") def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple: '''simple docstring''' if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __UpperCAmelCase , ) lowerCAmelCase__ = kwargs.pop("feature_extractor" ) lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding: '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes " "if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." ) # first, apply the image processor lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCAmelCase__ = features["words"] lowerCAmelCase__ = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , ) # add pixel values lowerCAmelCase__ = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] ) lowerCAmelCase__ = images return encoded_inputs def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str: '''simple docstring''' lowerCAmelCase__ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__UpperCAmelCase ) != len(__UpperCAmelCase ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" ) return images_with_overflow def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict: '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def UpperCAmelCase ( self )-> Optional[int]: '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "image"] @property def UpperCAmelCase ( self )-> Union[str, Any]: '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , ) return self.image_processor_class @property def UpperCAmelCase ( self )-> str: '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , ) return self.image_processor
340
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase_ = { 'configuration_chinese_clip': [ 'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ChineseCLIPConfig', 'ChineseCLIPOnnxConfig', 'ChineseCLIPTextConfig', 'ChineseCLIPVisionConfig', ], 'processing_chinese_clip': ['ChineseCLIPProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['ChineseCLIPFeatureExtractor'] lowerCAmelCase_ = ['ChineseCLIPImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'ChineseCLIPModel', 'ChineseCLIPPreTrainedModel', 'ChineseCLIPTextModel', 'ChineseCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
368
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase ( metaclass=__lowerCAmelCase ): snake_case_ = ['''note_seq'''] def __init__( self, *lowercase_, **lowercase_ ) -> str: requires_backends(self, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]: requires_backends(cls, ['note_seq'] ) @classmethod def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]: requires_backends(cls, ['note_seq'] )
332
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: lowerCAmelCase : Tuple =None lowerCAmelCase : List[Any] =logging.get_logger(__name__) lowerCAmelCase : Any ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase : Dict ={ '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase : Tuple ={ '''moussaKam/mbarthez''': 1_024, '''moussaKam/barthez''': 1_024, '''moussaKam/barthez-orangesum-title''': 1_024, } lowerCAmelCase : Tuple ='''▁''' class a_ ( _lowerCAmelCase ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = ["input_ids", "attention_mask"] __A = BarthezTokenizer def __init__( self : List[str] , lowercase : Tuple=None , lowercase : Any=None , lowercase : Optional[int]="<s>" , lowercase : Union[str, Any]="</s>" , lowercase : List[Any]="</s>" , lowercase : List[Any]="<s>" , lowercase : Optional[Any]="<unk>" , lowercase : str="<pad>" , lowercase : Optional[int]="<mask>" , **lowercase : Optional[Any] , ): """simple docstring""" lowercase_ :str = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token super().__init__( lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , **lowercase , ) lowercase_ :Union[str, Any] = vocab_file lowercase_ :Optional[Any] = False if not self.vocab_file else True def lowercase__ ( self : Union[str, Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase_ :Any = [self.cls_token_id] lowercase_ :Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase__ ( self : Optional[int] , lowercase : List[int] , lowercase : Optional[List[int]] = None ): """simple docstring""" lowercase_ :Optional[Any] = [self.sep_token_id] lowercase_ :Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase__ ( self : int , lowercase : str , lowercase : Optional[str] = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowercase ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return lowercase_ :Optional[int] = os.path.join( lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ): copyfile(self.vocab_file , lowercase ) return (out_vocab_file,)
223
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Optional[Any] ={ '''configuration_nllb_moe''': [ '''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NllbMoeConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple =[ '''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NllbMoeForConditionalGeneration''', '''NllbMoeModel''', '''NllbMoePreTrainedModel''', '''NllbMoeTop2Router''', '''NllbMoeSparseMLP''', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
223
1
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class _snake_case : def __init__( self: List[str] , __lowerCamelCase: int , ) -> Tuple: __UpperCAmelCase : Optional[int] = parent __UpperCAmelCase : Any = 13 __UpperCAmelCase : List[Any] = 7 __UpperCAmelCase : Tuple = 30 __UpperCAmelCase : Union[str, Any] = self.seq_length + self.mem_len __UpperCAmelCase : List[Any] = 15 __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : str = True __UpperCAmelCase : List[Any] = 99 __UpperCAmelCase : Any = [10, 50, 80] __UpperCAmelCase : Union[str, Any] = 32 __UpperCAmelCase : List[str] = 32 __UpperCAmelCase : int = 4 __UpperCAmelCase : List[Any] = 8 __UpperCAmelCase : Union[str, Any] = 1_28 __UpperCAmelCase : str = 2 __UpperCAmelCase : str = 2 __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Dict = 1 __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Union[str, Any] = 3 __UpperCAmelCase : Optional[Any] = self.vocab_size - 1 __UpperCAmelCase : Union[str, Any] = 0.01 def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : str = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def _lowerCamelCase ( self: str ) -> List[Any]: random.seed(self.seed ) tf.random.set_seed(self.seed ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Tuple , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int ) -> Any: __UpperCAmelCase : Optional[int] = TFTransfoXLModel(a__ ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = model(a__ ).to_tuple() __UpperCAmelCase : Union[str, Any] = {"input_ids": input_ids_a, "mems": mems_a} __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = model(a__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: List[str] , __lowerCamelCase: Dict ) -> int: __UpperCAmelCase : Optional[Any] = TFTransfoXLLMHeadModel(a__ ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = model(a__ ).to_tuple() __UpperCAmelCase : Optional[int] = {"input_ids": input_ids_a, "labels": lm_labels} __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = model(a__ ).to_tuple() __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple() __UpperCAmelCase : Union[str, Any] = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels} __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = model(a__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: List[Any] ) -> Union[str, Any]: __UpperCAmelCase : Optional[Any] = TFTransfoXLForSequenceClassification(a__ ) __UpperCAmelCase : Optional[Any] = model(a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self: List[str] ) -> Optional[Any]: __UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[int] = config_and_inputs __UpperCAmelCase : Optional[int] = {"input_ids": input_ids_a} return config, inputs_dict @require_tf class _snake_case ( __a , __a , unittest.TestCase ): lowerCamelCase__: Optional[int] = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) lowerCamelCase__: Optional[Any] = () if is_tf_available() else () lowerCamelCase__: Any = ( { """feature-extraction""": TFTransfoXLModel, """text-classification""": TFTransfoXLForSequenceClassification, """text-generation""": TFTransfoXLLMHeadModel, """zero-shot""": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented lowerCamelCase__: List[str] = False lowerCamelCase__: str = False lowerCamelCase__: str = False lowerCamelCase__: List[Any] = False def _lowerCamelCase ( self: int , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: Optional[Any] , __lowerCamelCase: int ) -> int: if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def _lowerCamelCase ( self: int ) -> Optional[Any]: __UpperCAmelCase : Union[str, Any] = TFTransfoXLModelTester(self ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 ) def _lowerCamelCase ( self: Optional[Any] ) -> Union[str, Any]: self.config_tester.run_common_tests() def _lowerCamelCase ( self: int ) -> str: self.model_tester.set_seed() __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*a__ ) def _lowerCamelCase ( self: Optional[int] ) -> Tuple: self.model_tester.set_seed() __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*a__ ) def _lowerCamelCase ( self: Any ) -> int: __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ ) def _lowerCamelCase ( self: Optional[int] ) -> int: __UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Any = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = model_class(a__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: __UpperCAmelCase : Union[str, Any] = model.get_output_embeddings() assert isinstance(a__ , tf.keras.layers.Layer ) __UpperCAmelCase : List[str] = model.get_bias() assert name is None else: __UpperCAmelCase : int = model.get_output_embeddings() assert x is None __UpperCAmelCase : List[Any] = model.get_bias() assert name is None def _lowerCamelCase ( self: Dict ) -> str: # TODO JP: Make TransfoXL XLA compliant pass @slow def _lowerCamelCase ( self: List[Any] ) -> str: for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) @unittest.skip(reason="This model doesn\'t play well with fit() due to not returning a single loss." ) def _lowerCamelCase ( self: Any ) -> List[str]: pass @require_tf class _snake_case ( unittest.TestCase ): @unittest.skip("Skip test until #12651 is resolved." ) @slow def _lowerCamelCase ( self: int ) -> Tuple: __UpperCAmelCase : Dict = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" ) # fmt: off __UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off __UpperCAmelCase : Dict = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> __UpperCAmelCase : Union[str, Any] = model.generate(a__ , max_length=2_00 , do_sample=a__ ) self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
360
import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class _snake_case ( _lowercase ): lowerCamelCase__: Any = ["image_processor", "tokenizer"] lowerCamelCase__: Optional[Any] = "BlipImageProcessor" lowerCamelCase__: Optional[int] = "AutoTokenizer" def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict: super().__init__(__lowerCamelCase , __lowerCamelCase ) # add QFormer tokenizer __UpperCAmelCase : Dict = qformer_tokenizer def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature: if images is None and text is None: raise ValueError("You have to specify at least images or text." ) __UpperCAmelCase : str = BatchFeature() if text is not None: __UpperCAmelCase : Any = self.tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) encoding.update(__lowerCamelCase ) __UpperCAmelCase : Dict = self.qformer_tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) __UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" ) __UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" ) if images is not None: __UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase ) encoding.update(__lowerCamelCase ) return encoding def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]: return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple: return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def _lowerCamelCase ( self: List[str] ) -> Tuple: __UpperCAmelCase : str = self.tokenizer.model_input_names __UpperCAmelCase : Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str: if os.path.isfile(__lowerCamelCase ): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) __UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(__lowerCamelCase ) return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase ) @classmethod def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]: __UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" ) __UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase ) args.append(__lowerCamelCase ) return cls(*__lowerCamelCase )
342
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __snake_case : List[Any] =logging.get_logger(__name__) __snake_case : str ={ """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' snake_case_ ="""table-transformer""" snake_case_ =["""past_key_values"""] snake_case_ ={ """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__(self ,__lowerCamelCase=True ,__lowerCamelCase=None ,__lowerCamelCase=3 ,__lowerCamelCase=1_00 ,__lowerCamelCase=6 ,__lowerCamelCase=20_48 ,__lowerCamelCase=8 ,__lowerCamelCase=6 ,__lowerCamelCase=20_48 ,__lowerCamelCase=8 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,__lowerCamelCase=True ,__lowerCamelCase="relu" ,__lowerCamelCase=2_56 ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.02 ,__lowerCamelCase=1.0 ,__lowerCamelCase=False ,__lowerCamelCase="sine" ,__lowerCamelCase="resnet50" ,__lowerCamelCase=True ,__lowerCamelCase=False ,__lowerCamelCase=1 ,__lowerCamelCase=5 ,__lowerCamelCase=2 ,__lowerCamelCase=1 ,__lowerCamelCase=1 ,__lowerCamelCase=5 ,__lowerCamelCase=2 ,__lowerCamelCase=0.1 ,**__lowerCamelCase ,) -> Dict: """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowerCAmelCase__ : str = CONFIG_MAPPING["""resnet"""](out_features=['''stage4'''] ) elif isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ): lowerCAmelCase__ : Optional[int] = backbone_config.get('''model_type''' ) lowerCAmelCase__ : List[str] = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase__ : Optional[Any] = config_class.from_dict(UpperCAmelCase__ ) # set timm attributes to None lowerCAmelCase__ : Union[str, Any] = None, None, None lowerCAmelCase__ : List[Any] = use_timm_backbone lowerCAmelCase__ : List[Any] = backbone_config lowerCAmelCase__ : Dict = num_channels lowerCAmelCase__ : Union[str, Any] = num_queries lowerCAmelCase__ : Optional[Any] = d_model lowerCAmelCase__ : Any = encoder_ffn_dim lowerCAmelCase__ : str = encoder_layers lowerCAmelCase__ : List[str] = encoder_attention_heads lowerCAmelCase__ : Tuple = decoder_ffn_dim lowerCAmelCase__ : Optional[Any] = decoder_layers lowerCAmelCase__ : str = decoder_attention_heads lowerCAmelCase__ : List[Any] = dropout lowerCAmelCase__ : List[Any] = attention_dropout lowerCAmelCase__ : Optional[int] = activation_dropout lowerCAmelCase__ : Tuple = activation_function lowerCAmelCase__ : int = init_std lowerCAmelCase__ : str = init_xavier_std lowerCAmelCase__ : Tuple = encoder_layerdrop lowerCAmelCase__ : Union[str, Any] = decoder_layerdrop lowerCAmelCase__ : Optional[Any] = encoder_layers lowerCAmelCase__ : Any = auxiliary_loss lowerCAmelCase__ : List[Any] = position_embedding_type lowerCAmelCase__ : List[Any] = backbone lowerCAmelCase__ : Optional[Any] = use_pretrained_backbone lowerCAmelCase__ : Optional[Any] = dilation # Hungarian matcher lowerCAmelCase__ : List[Any] = class_cost lowerCAmelCase__ : Tuple = bbox_cost lowerCAmelCase__ : Dict = giou_cost # Loss coefficients lowerCAmelCase__ : Dict = mask_loss_coefficient lowerCAmelCase__ : Optional[Any] = dice_loss_coefficient lowerCAmelCase__ : Tuple = bbox_loss_coefficient lowerCAmelCase__ : List[Any] = giou_loss_coefficient lowerCAmelCase__ : List[Any] = eos_coefficient super().__init__(is_encoder_decoder=UpperCAmelCase__ ,**UpperCAmelCase__ ) @property def lowerCAmelCase__ (self ) -> int: """simple docstring""" return self.encoder_attention_heads @property def lowerCAmelCase__ (self ) -> int: """simple docstring""" return self.d_model class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' snake_case_ =version.parse("""1.11""") @property def lowerCAmelCase__ (self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def lowerCAmelCase__ (self ) -> float: """simple docstring""" return 1e-5 @property def lowerCAmelCase__ (self ) -> int: """simple docstring""" return 12
129
import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def __lowercase ( _A ) -> List[Tuple[int, ...]]: SCREAMING_SNAKE_CASE : Optional[int] = [] if isinstance(_A , _A ): for v in tree.values(): shapes.extend(_fetch_dims(_A ) ) elif isinstance(_A , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(_A ) ) elif isinstance(_A , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("""Not supported""" ) return shapes @torch.jit.ignore def __lowercase ( _A , _A ) -> Tuple[int, ...]: SCREAMING_SNAKE_CASE : List[Any] = [] for d in reversed(_A ): idx.append(flat_idx % d ) SCREAMING_SNAKE_CASE : Tuple = flat_idx // d return tuple(reversed(_A ) ) @torch.jit.ignore def __lowercase ( _A , _A , _A , _A = None , _A = None , ) -> List[Tuple[slice, ...]]: # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(_A ) -> None: SCREAMING_SNAKE_CASE : int = True for i in range(len(_A ) ): SCREAMING_SNAKE_CASE : Dict = -1 * (i + 1) l[reversed_idx] &= tally SCREAMING_SNAKE_CASE : Any = l[reversed_idx] if start_edges is None: SCREAMING_SNAKE_CASE : Tuple = [s == 0 for s in start] reduce_edge_list(_A ) if end_edges is None: SCREAMING_SNAKE_CASE : Tuple = [e == (d - 1) for e, d in zip(_A , _A )] reduce_edge_list(_A ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(_A ) == 0: return [()] elif len(_A ) == 1: return [(slice(start[0] , end[0] + 1 ),)] SCREAMING_SNAKE_CASE : List[Tuple[slice, ...]] = [] SCREAMING_SNAKE_CASE : List[slice] = [] # Dimensions common to start and end can be selected directly for s, e in zip(_A , _A ): if s == e: path_list.append(slice(_A , s + 1 ) ) else: break SCREAMING_SNAKE_CASE : Tuple[slice, ...] = tuple(_A ) SCREAMING_SNAKE_CASE : List[str] = len(_A ) # start == end, and we're done if divergence_idx == len(_A ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None SCREAMING_SNAKE_CASE : List[str] = start[divergence_idx] return tuple( path + (slice(_A , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None SCREAMING_SNAKE_CASE : Tuple = end[divergence_idx] return tuple( path + (slice(_A , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) SCREAMING_SNAKE_CASE : int = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def __lowercase ( _A , _A , _A , _A ) -> torch.Tensor: SCREAMING_SNAKE_CASE : Tuple = t.shape[:no_batch_dims] SCREAMING_SNAKE_CASE : Union[str, Any] = list(_flat_idx_to_idx(_A , _A ) ) # _get_minimal_slice_set is inclusive SCREAMING_SNAKE_CASE : Any = list(_flat_idx_to_idx(flat_end - 1 , _A ) ) # Get an ordered list of slices to perform SCREAMING_SNAKE_CASE : List[Any] = _get_minimal_slice_set( _A , _A , _A , ) SCREAMING_SNAKE_CASE : List[Any] = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def __lowercase ( _A , _A , _A , _A , _A = False , _A = None , _A = False , ) -> Any: if not (len(_A ) > 0): raise ValueError("""Must provide at least one input""" ) SCREAMING_SNAKE_CASE : Tuple = [shape[:no_batch_dims] for shape in _fetch_dims(_A )] SCREAMING_SNAKE_CASE : str = tuple([max(_A ) for s in zip(*_A )] ) def _prep_inputs(_A ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: SCREAMING_SNAKE_CASE : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) SCREAMING_SNAKE_CASE : Union[str, Any] = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: SCREAMING_SNAKE_CASE : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_prep_inputs , _A ) SCREAMING_SNAKE_CASE : Optional[int] = None if _out is not None: SCREAMING_SNAKE_CASE : Optional[int] = tensor_tree_map(lambda _A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) SCREAMING_SNAKE_CASE : Optional[int] = 1 for d in orig_batch_dims: flat_batch_dim *= d SCREAMING_SNAKE_CASE : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(_A ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t SCREAMING_SNAKE_CASE : Union[str, Any] = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = prepped_outputs for _ in range(_A ): # Chunk the input if not low_mem: SCREAMING_SNAKE_CASE : int = _select_chunk else: SCREAMING_SNAKE_CASE : Optional[int] = partial( _chunk_slice , flat_start=_A , flat_end=min(_A , i + chunk_size ) , no_batch_dims=len(_A ) , ) SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_A , _A ) # Run the layer on the chunk SCREAMING_SNAKE_CASE : Tuple = layer(**_A ) # Allocate space for the output if out is None: SCREAMING_SNAKE_CASE : List[str] = tensor_tree_map(lambda _A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _A ) # Put the chunk in its pre-allocated space if isinstance(_A , _A ): def assign(_A , _A ) -> None: for k, v in da.items(): if isinstance(_A , _A ): assign(_A , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: SCREAMING_SNAKE_CASE : Optional[Any] = da[k] assign(_A , _A ) elif isinstance(_A , _A ): for xa, xa in zip(_A , _A ): if _add_into_out: xa[i : i + chunk_size] += xa else: SCREAMING_SNAKE_CASE : str = xa elif isinstance(_A , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: SCREAMING_SNAKE_CASE : List[Any] = output_chunk else: raise ValueError("""Not supported""" ) i += chunk_size SCREAMING_SNAKE_CASE : Any = tensor_tree_map(lambda _A : t.view(orig_batch_dims + t.shape[1:] ) , _A ) return out class a__ : """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : int = 5_1_2 , ) ->int: """simple docstring""" SCREAMING_SNAKE_CASE : str = max_chunk_size SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Optional[tuple] = None def _lowercase ( self : List[Any] , UpperCAmelCase__ : Callable , UpperCAmelCase__ : tuple , UpperCAmelCase__ : int ) ->int: """simple docstring""" logging.info("""Tuning chunk size...""" ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size SCREAMING_SNAKE_CASE : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] SCREAMING_SNAKE_CASE : Dict = [c for c in candidates if c > min_chunk_size] SCREAMING_SNAKE_CASE : List[str] = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(UpperCAmelCase__ : int ) -> bool: try: with torch.no_grad(): fn(*UpperCAmelCase__ , chunk_size=UpperCAmelCase__ ) return True except RuntimeError: return False SCREAMING_SNAKE_CASE : List[str] = 0 SCREAMING_SNAKE_CASE : List[str] = len(UpperCAmelCase__ ) - 1 while i > min_viable_chunk_size_index: SCREAMING_SNAKE_CASE : int = test_chunk_size(candidates[i] ) if not viable: SCREAMING_SNAKE_CASE : Tuple = (min_viable_chunk_size_index + i) // 2 else: SCREAMING_SNAKE_CASE : List[str] = i SCREAMING_SNAKE_CASE : List[str] = (i + len(UpperCAmelCase__ ) - 1) // 2 return candidates[min_viable_chunk_size_index] def _lowercase ( self : List[Any] , UpperCAmelCase__ : Iterable , UpperCAmelCase__ : Iterable ) ->bool: """simple docstring""" SCREAMING_SNAKE_CASE : str = True for aa, aa in zip(UpperCAmelCase__ , UpperCAmelCase__ ): assert type(UpperCAmelCase__ ) == type(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , (list, tuple) ): consistent &= self._compare_arg_caches(UpperCAmelCase__ , UpperCAmelCase__ ) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): SCREAMING_SNAKE_CASE : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase__ : x[0] )] SCREAMING_SNAKE_CASE : List[str] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase__ : x[0] )] consistent &= self._compare_arg_caches(UpperCAmelCase__ , UpperCAmelCase__ ) else: consistent &= aa == aa return consistent def _lowercase ( self : List[str] , UpperCAmelCase__ : Callable , UpperCAmelCase__ : tuple , UpperCAmelCase__ : int , ) ->int: """simple docstring""" SCREAMING_SNAKE_CASE : int = True SCREAMING_SNAKE_CASE : tuple = tree_map(lambda UpperCAmelCase__ : a.shape if isinstance(UpperCAmelCase__ , torch.Tensor ) else a , UpperCAmelCase__ , UpperCAmelCase__ ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase__ ) else: # Otherwise, we can reuse the precomputed value SCREAMING_SNAKE_CASE : List[Any] = False if not consistent: SCREAMING_SNAKE_CASE : List[Any] = self._determine_favorable_chunk_size( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
245
0
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __SCREAMING_SNAKE_CASE : Dict = datasets.logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Any = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' __SCREAMING_SNAKE_CASE : Any = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' __SCREAMING_SNAKE_CASE : Dict = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="dummy_doc" ) -> Tuple: snake_case_ = {doc: key_lines} snake_case_ = {doc: sys_lines} snake_case_ = {} snake_case_ = 0 snake_case_ = 0 snake_case_ = 0 snake_case_ = 0 snake_case_ = 0 snake_case_ = 0 snake_case_ , snake_case_ = reader.get_doc_mentions(_SCREAMING_SNAKE_CASE , key_doc_lines[doc] , _SCREAMING_SNAKE_CASE ) key_singletons_num += singletons_num if NP_only or min_span: snake_case_ = reader.set_annotated_parse_trees(_SCREAMING_SNAKE_CASE , key_doc_lines[doc] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ , snake_case_ = reader.get_doc_mentions(_SCREAMING_SNAKE_CASE , sys_doc_lines[doc] , _SCREAMING_SNAKE_CASE ) sys_singletons_num += singletons_num if NP_only or min_span: snake_case_ = reader.set_annotated_parse_trees(_SCREAMING_SNAKE_CASE , key_doc_lines[doc] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if remove_nested: snake_case_ , snake_case_ = reader.remove_nested_coref_mentions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters snake_case_ , snake_case_ = reader.remove_nested_coref_mentions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters snake_case_ = reader.get_mention_assignments(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = reader.get_mention_assignments(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" ) logger.info( """Number of resulting singleton clusters in the key """ f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" ) if not keep_singletons: logger.info( f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """ """files, respectively""" ) return doc_coref_infos def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = get_coref_infos(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ = {} snake_case_ = 0 snake_case_ = 0 for name, metric in metrics: snake_case_ , snake_case_ , snake_case_ = evaluator.evaluate_documents(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} ) logger.info( name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , ) if conll_subparts_num == 3: snake_case_ = (conll / 3) * 100 logger.info(f"""CoNLL score: {conll:.2f}""" ) output_scores.update({"""conll_score""": conll} ) return output_scores def _a ( _SCREAMING_SNAKE_CASE ) -> str: snake_case_ = False for line in key_lines: if not line.startswith("""#""" ): if len(line.split() ) > 6: snake_case_ = line.split()[5] if not parse_col == "-": snake_case_ = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __A (datasets.Metric): '''simple docstring''' def lowerCAmelCase ( self : List[str] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def lowerCAmelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : int=False ): """simple docstring""" snake_case_ = [ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: snake_case_ = util.check_gold_parse_annotation(UpperCAmelCase_ ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" snake_case_ = evaluate( key_lines=UpperCAmelCase_ , sys_lines=UpperCAmelCase_ , metrics=UpperCAmelCase_ , NP_only=UpperCAmelCase_ , remove_nested=UpperCAmelCase_ , keep_singletons=UpperCAmelCase_ , min_span=UpperCAmelCase_ , ) return score
367
"""simple docstring""" import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]: if attention_mask is None: snake_case_ = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: snake_case_ = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: snake_case_ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_SCREAMING_SNAKE_CASE ) if decoder_head_mask is None: snake_case_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE ) if cross_attn_head_mask is None: snake_case_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class __A : '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : int=99 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Optional[Any]="relu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : List[str]=20 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Optional[Any]=0 , ) ->Dict: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = encoder_layerdrop snake_case_ = decoder_layerdrop snake_case_ = max_position_embeddings snake_case_ = eos_token_id snake_case_ = pad_token_id snake_case_ = bos_token_id def lowerCAmelCase ( self : Tuple ) ->List[str]: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = self.eos_token_id # Eos Token snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input snake_case_ = input_ids.clamp(self.pad_token_id + 1 ) snake_case_ = decoder_input_ids.clamp(self.pad_token_id + 1 ) snake_case_ = self.get_config() snake_case_ = prepare_mam_aaa_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return config, inputs_dict def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]: """simple docstring""" return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" snake_case_ , snake_case_ = self.prepare_config_and_inputs() return config, inputs_dict def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ) ->Dict: """simple docstring""" snake_case_ = MaMaaaModel(config=UpperCAmelCase_ ).get_decoder().to(UpperCAmelCase_ ).eval() snake_case_ = inputs_dict["""input_ids"""] snake_case_ = inputs_dict["""attention_mask"""] snake_case_ = inputs_dict["""head_mask"""] # first forward pass snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ ) snake_case_ , snake_case_ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case_ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )["""last_hidden_state"""] snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )[ """last_hidden_state""" ] # select random slice snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-2 ) ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ) ->int: """simple docstring""" snake_case_ = MaMaaaModel(config=UpperCAmelCase_ ).to(UpperCAmelCase_ ).eval() snake_case_ = model(**UpperCAmelCase_ ) snake_case_ = outputs.encoder_last_hidden_state snake_case_ = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ = model.get_encoder() encoder.save_pretrained(UpperCAmelCase_ ) snake_case_ = MaMaaaEncoder.from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ ) snake_case_ = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ = model.get_decoder() decoder.save_pretrained(UpperCAmelCase_ ) snake_case_ = MaMaaaDecoder.from_pretrained(UpperCAmelCase_ ).to(UpperCAmelCase_ ) snake_case_ = decoder( input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class __A (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: Optional[Any] = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) __lowercase: Union[str, Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else () __lowercase: Tuple = ( { """conversational""": MaMaaaForConditionalGeneration, """feature-extraction""": MaMaaaModel, """summarization""": MaMaaaForConditionalGeneration, """text2text-generation""": MaMaaaForConditionalGeneration, """translation""": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) __lowercase: Dict = True __lowercase: List[Any] = True __lowercase: Union[str, Any] = False __lowercase: Optional[int] = False def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ) ->str: """simple docstring""" if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def lowerCAmelCase ( self : int ) ->Dict: """simple docstring""" snake_case_ = MaMaaaModelTester(self ) snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] ) ->Tuple: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: snake_case_ = model_class(UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase_ ) snake_case_ , snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ ) self.assertEqual(info["""missing_keys"""] , [] ) def lowerCAmelCase ( self : Optional[int] ) ->List[Any]: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase_ ) def lowerCAmelCase ( self : str ) ->List[str]: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase_ ) def lowerCAmelCase ( self : str ) ->List[str]: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): snake_case_ = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() snake_case_ = copy.deepcopy(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) if not self.is_encoder_decoder: snake_case_ = inputs["""input_ids"""] del inputs["input_ids"] else: snake_case_ = inputs["""input_ids"""] snake_case_ = inputs.get("""decoder_input_ids""" , UpperCAmelCase_ ) del inputs["input_ids"] inputs.pop("""decoder_input_ids""" , UpperCAmelCase_ ) snake_case_ = model.get_input_embeddings() if not self.is_encoder_decoder: snake_case_ = wte(UpperCAmelCase_ ) else: snake_case_ = wte(UpperCAmelCase_ ) snake_case_ = wte(UpperCAmelCase_ ) with torch.no_grad(): model(**UpperCAmelCase_ )[0] def lowerCAmelCase ( self : Any ) ->Any: """simple docstring""" snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs() snake_case_ = input_dict["""input_ids"""] snake_case_ = input_ids.ne(1 ).to(UpperCAmelCase_ ) snake_case_ = MaMaaaForConditionalGeneration(UpperCAmelCase_ ).eval().to(UpperCAmelCase_ ) if torch_device == "cuda": model.half() model.generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) model.generate(num_beams=4 , do_sample=UpperCAmelCase_ , early_stopping=UpperCAmelCase_ , num_return_sequences=3 ) def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE : Tuple = 1E-4 @require_torch @require_sentencepiece @require_tokenizers @slow class __A (unittest.TestCase): '''simple docstring''' @cached_property def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" ) def lowerCAmelCase ( self : str ) ->Any: """simple docstring""" snake_case_ = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCAmelCase_ ) snake_case_ = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] ) snake_case_ = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] ) snake_case_ = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ ) with torch.no_grad(): snake_case_ = model(**UpperCAmelCase_ )[0] snake_case_ = torch.Size((1, 11, 1_024) ) self.assertEqual(output.shape , UpperCAmelCase_ ) # change to expected output here snake_case_ = torch.tensor( [[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=UpperCAmelCase_ ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : Optional[int] ) ->Any: """simple docstring""" snake_case_ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCAmelCase_ ) # change to intended input snake_case_ = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] ) snake_case_ = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] ) snake_case_ = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ ) with torch.no_grad(): snake_case_ = model(**UpperCAmelCase_ )[0] snake_case_ = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , UpperCAmelCase_ ) # change to expected output here snake_case_ = torch.tensor( [[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=UpperCAmelCase_ ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) ) def lowerCAmelCase ( self : Dict ) ->str: """simple docstring""" snake_case_ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(UpperCAmelCase_ ) snake_case_ = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" ) snake_case_ = [ """L'affaire NSA souligne l'absence totale de débat sur le renseignement""", """Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""", """Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent""" """ Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de""" """ l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""", ] # The below article tests that we don't add any hypotheses outside of the top n_beams snake_case_ = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors="""pt""" ) snake_case_ = model.generate( input_ids=dct["""input_ids"""].to(UpperCAmelCase_ ) , attention_mask=dct["""attention_mask"""].to(UpperCAmelCase_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , ) snake_case_ = [ """The NSA case highlights the total absence of intelligence debate""", """I think there are two levels of response from the French government.""", """When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.""" """ Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all""" """ communications in France.""", ] snake_case_ = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) assert generated == expected_en
233
0
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __lowercase : Dict = abspath(join(dirname(dirname(__file__)), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ): from diffusers.utils.testing_utils import pytest_terminal_summary_main __a : Union[str, Any] = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
27
'''simple docstring''' # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int ): with open(_SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*_SCREAMING_SNAKE_CASE ) finally: fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __lowercase : Dict = int(os.environ['LOCAL_RANK']) torch.cuda.set_device(local_rank) __lowercase : Tuple = torch.device('cuda', local_rank) __lowercase : Optional[int] = socket.gethostname() __lowercase : List[str] = f'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group('nccl') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __lowercase : str = dist.get_rank() __lowercase : Union[str, Any] = dist.get_world_size() printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(f'''{gpu} is broken''') raise
27
1
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging _A = logging.get_logger(__name__) _A = { 't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json', } class _lowercase ( __UpperCAmelCase ): lowercase_ = 't5' lowercase_ = ['past_key_values'] lowercase_ = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self , UpperCAmelCase_=32128 , UpperCAmelCase_=512 , UpperCAmelCase_=64 , UpperCAmelCase_=2048 , UpperCAmelCase_=6 , UpperCAmelCase_=None , UpperCAmelCase_=8 , UpperCAmelCase_=32 , UpperCAmelCase_=128 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=1.0 , UpperCAmelCase_="relu" , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , **UpperCAmelCase_ , ) -> Dict: lowerCamelCase : int = vocab_size lowerCamelCase : Dict = d_model lowerCamelCase : List[Any] = d_kv lowerCamelCase : int = d_ff lowerCamelCase : Dict = num_layers lowerCamelCase : Any = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCamelCase : Optional[int] = num_heads lowerCamelCase : List[Any] = relative_attention_num_buckets lowerCamelCase : Optional[Any] = relative_attention_max_distance lowerCamelCase : str = dropout_rate lowerCamelCase : Union[str, Any] = layer_norm_epsilon lowerCamelCase : Tuple = initializer_factor lowerCamelCase : Tuple = feed_forward_proj lowerCamelCase : str = use_cache lowerCamelCase : Optional[int] = self.feed_forward_proj.split('-' ) lowerCamelCase : Optional[Any] = act_info[-1] lowerCamelCase : str = act_info[0] == 'gated' if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCamelCase : Any = 'gelu_new' super().__init__( pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , ) class _lowercase ( __UpperCAmelCase ): @property def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: lowerCamelCase : List[Any] = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: lowerCamelCase : Optional[Any] = 'past_encoder_sequence + sequence' lowerCamelCase : Optional[Any] = {0: 'batch'} lowerCamelCase : str = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: lowerCamelCase : Dict = {0: 'batch', 1: 'decoder_sequence'} lowerCamelCase : List[Any] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(UpperCAmelCase_ , direction='inputs' ) return common_inputs @property def _UpperCamelCase ( self ) -> int: return 13
358
"""simple docstring""" def UpperCAmelCase ( a_ ): '''simple docstring''' lowerCamelCase : List[Any] = 1 for i in range(1, num + 1 ): fact *= i return fact def UpperCAmelCase ( a_ ): '''simple docstring''' lowerCamelCase : Optional[Any] = 0 while number > 0: lowerCamelCase : str = number % 10 sum_of_digits += last_digit lowerCamelCase : Tuple = number // 10 # Removing the last_digit from the given number return sum_of_digits def UpperCAmelCase ( a_ = 100 ): '''simple docstring''' lowerCamelCase : Optional[Any] = factorial(a_ ) lowerCamelCase : int = split_and_add(a_ ) return result if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
205
0
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer __lowerCamelCase : Any = logging.get_logger(__name__) __lowerCamelCase : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} __lowerCamelCase : Optional[Any] = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } __lowerCamelCase : Tuple = { '''roberta-base''': 5_12, '''roberta-large''': 5_12, '''roberta-large-mnli''': 5_12, '''distilroberta-base''': 5_12, '''roberta-base-openai-detector''': 5_12, '''roberta-large-openai-detector''': 5_12, } class a__ ( A__ ): A = VOCAB_FILES_NAMES A = PRETRAINED_VOCAB_FILES_MAP A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A = ['input_ids', 'attention_mask'] A = RobertaTokenizer def __init__( self : Optional[int],_A : str=None,_A : Any=None,_A : Tuple=None,_A : Optional[Any]="replace",_A : int="<s>",_A : int="</s>",_A : Tuple="</s>",_A : Optional[int]="<s>",_A : List[Any]="<unk>",_A : Optional[Any]="<pad>",_A : Dict="<mask>",_A : List[str]=False,_A : Optional[Any]=True,**_A : int,): """simple docstring""" super().__init__( _A,_A,tokenizer_file=_A,errors=_A,bos_token=_A,eos_token=_A,sep_token=_A,cls_token=_A,unk_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,trim_offsets=_A,**_A,) SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space",_A ) != add_prefix_space: SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) ) SCREAMING_SNAKE_CASE_ : Optional[int] = add_prefix_space SCREAMING_SNAKE_CASE_ : Optional[int] = pre_tok_class(**_A ) SCREAMING_SNAKE_CASE_ : List[Any] = add_prefix_space SCREAMING_SNAKE_CASE_ : Optional[int] = "post_processor" SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.backend_tokenizer,_A,_A ) if tokenizer_component_instance: SCREAMING_SNAKE_CASE_ : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: SCREAMING_SNAKE_CASE_ : Tuple = tuple(state["sep"] ) if "cls" in state: SCREAMING_SNAKE_CASE_ : Optional[Any] = tuple(state["cls"] ) SCREAMING_SNAKE_CASE_ : Optional[int] = False if state.get("add_prefix_space",_A ) != add_prefix_space: SCREAMING_SNAKE_CASE_ : List[Any] = add_prefix_space SCREAMING_SNAKE_CASE_ : int = True if state.get("trim_offsets",_A ) != trim_offsets: SCREAMING_SNAKE_CASE_ : List[Any] = trim_offsets SCREAMING_SNAKE_CASE_ : List[Any] = True if changes_to_apply: SCREAMING_SNAKE_CASE_ : int = getattr(_A,state.pop("type" ) ) SCREAMING_SNAKE_CASE_ : str = component_class(**_A ) setattr(self.backend_tokenizer,_A,_A ) @property def __UpperCamelCase ( self : Any ): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def __UpperCamelCase ( self : Tuple,_A : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else value SCREAMING_SNAKE_CASE_ : Union[str, Any] = value def __UpperCamelCase ( self : Union[str, Any],*_A : int,**_A : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = kwargs.get("is_split_into_words",_A ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*_A,**_A ) def __UpperCamelCase ( self : Optional[int],*_A : int,**_A : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = kwargs.get("is_split_into_words",_A ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*_A,**_A ) def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self._tokenizer.model.save(_A,name=_A ) return tuple(_A ) def __UpperCamelCase ( self : Dict,_A : Optional[int],_A : List[Any]=None ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id] SCREAMING_SNAKE_CASE_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
18
"""simple docstring""" import argparse import copy def lowercase__ ( snake_case_ :Tuple ): __UpperCAmelCase = {} with open(snake_case_ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: __UpperCAmelCase = [] _list.append([line.split()[1], line.split()[2]] ) __UpperCAmelCase = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: __UpperCAmelCase = [] _list.append([line.split()[0], line.split()[2]] ) __UpperCAmelCase = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowercase__ ( snake_case_ :Dict , snake_case_ :Optional[Any] ): with open(snake_case_ ) as f: __UpperCAmelCase = f.read(1 ) __UpperCAmelCase = start_node __UpperCAmelCase = [] __UpperCAmelCase = start_node __UpperCAmelCase = 0 while visiting not in first_solution: __UpperCAmelCase = 10_000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(snake_case_ ) and k[0] not in first_solution: __UpperCAmelCase = k[1] __UpperCAmelCase = k[0] first_solution.append(snake_case_ ) __UpperCAmelCase = distance_of_first_solution + int(snake_case_ ) __UpperCAmelCase = best_node first_solution.append(snake_case_ ) __UpperCAmelCase = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 __UpperCAmelCase = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10_000 ) return first_solution, distance_of_first_solution def lowercase__ ( snake_case_ :int , snake_case_ :Tuple ): __UpperCAmelCase = [] for n in solution[1:-1]: __UpperCAmelCase = solution.index(snake_case_ ) for kn in solution[1:-1]: __UpperCAmelCase = solution.index(snake_case_ ) if n == kn: continue __UpperCAmelCase = copy.deepcopy(snake_case_ ) __UpperCAmelCase = kn __UpperCAmelCase = n __UpperCAmelCase = 0 for k in _tmp[:-1]: __UpperCAmelCase = _tmp[_tmp.index(snake_case_ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: __UpperCAmelCase = distance + int(i[1] ) _tmp.append(snake_case_ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) __UpperCAmelCase = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda snake_case_ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Optional[int] , snake_case_ :Dict , snake_case_ :int ): __UpperCAmelCase = 1 __UpperCAmelCase = first_solution __UpperCAmelCase = [] __UpperCAmelCase = distance_of_first_solution __UpperCAmelCase = solution while count <= iters: __UpperCAmelCase = find_neighborhood(snake_case_ , snake_case_ ) __UpperCAmelCase = 0 __UpperCAmelCase = neighborhood[index_of_best_solution] __UpperCAmelCase = len(snake_case_ ) - 1 __UpperCAmelCase = False while not found: __UpperCAmelCase = 0 while i < len(snake_case_ ): if best_solution[i] != solution[i]: __UpperCAmelCase = best_solution[i] __UpperCAmelCase = solution[i] break __UpperCAmelCase = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) __UpperCAmelCase = True __UpperCAmelCase = best_solution[:-1] __UpperCAmelCase = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: __UpperCAmelCase = cost __UpperCAmelCase = solution else: __UpperCAmelCase = index_of_best_solution + 1 __UpperCAmelCase = neighborhood[index_of_best_solution] if len(snake_case_ ) >= size: tabu_list.pop(0 ) __UpperCAmelCase = count + 1 return best_solution_ever, best_cost def lowercase__ ( snake_case_ :str=None ): __UpperCAmelCase = generate_neighbours(args.File ) __UpperCAmelCase , __UpperCAmelCase = generate_first_solution( args.File , snake_case_ ) __UpperCAmelCase , __UpperCAmelCase = tabu_search( snake_case_ , snake_case_ , snake_case_ , args.Iterations , args.Size , ) print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": _lowercase : List[str] = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
332
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __lowerCamelCase : Tuple = logging.get_logger(__name__) __lowerCamelCase : Union[str, Any] = '''▁''' __lowerCamelCase : str = {'''vocab_file''': '''sentencepiece.bpe.model'''} __lowerCamelCase : Optional[Any] = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model''' ), } } __lowerCamelCase : Any = { '''facebook/nllb-200-distilled-600M''': 1024, } # fmt: off __lowerCamelCase : Tuple = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class __snake_case ( lowerCamelCase_ ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = ["input_ids", "attention_mask"] lowerCAmelCase_ = [] lowerCAmelCase_ = [] def __init__( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]="<s>" , _lowercase : Any="</s>" , _lowercase : Optional[int]="</s>" , _lowercase : Dict="<s>" , _lowercase : Tuple="<unk>" , _lowercase : Tuple="<pad>" , _lowercase : Optional[Any]="<mask>" , _lowercase : Any=None , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None , _lowercase : Optional[Dict[str, Any]] = None , _lowercase : List[str]=None , _lowercase : int=False , **_lowercase : Optional[int] , ): """simple docstring""" SCREAMING_SNAKE_CASE__ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs SCREAMING_SNAKE_CASE__ = legacy_behaviour super().__init__( bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , tokenizer_file=_lowercase , src_lang=_lowercase , tgt_lang=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_lowercase , **_lowercase , ) SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowercase ) ) SCREAMING_SNAKE_CASE__ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token SCREAMING_SNAKE_CASE__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = len(self.sp_model ) SCREAMING_SNAKE_CASE__ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowercase ) } SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.lang_code_to_id.items()} SCREAMING_SNAKE_CASE__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} SCREAMING_SNAKE_CASE__ = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) SCREAMING_SNAKE_CASE__ = src_lang if src_lang is not None else """eng_Latn""" SCREAMING_SNAKE_CASE__ = self.lang_code_to_id[self._src_lang] SCREAMING_SNAKE_CASE__ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : Any ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.__dict__.copy() SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto() return state def __setstate__( self : int , _lowercase : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): SCREAMING_SNAKE_CASE__ = {} SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def __a ( self : Optional[int] ): """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def __a ( self : Dict ): """simple docstring""" return self._src_lang @src_lang.setter def __a ( self : int , _lowercase : str ): """simple docstring""" SCREAMING_SNAKE_CASE__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __a ( self : Union[str, Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase ) SCREAMING_SNAKE_CASE__ = [1] * len(self.prefix_tokens ) SCREAMING_SNAKE_CASE__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_lowercase )) + suffix_ones return prefix_ones + ([0] * len(_lowercase )) + ([0] * len(_lowercase )) + suffix_ones def __a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __a ( self : Dict , _lowercase : List[Any] , _lowercase : str , _lowercase : Optional[str] , _lowercase : Optional[str] , **_lowercase : int ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) SCREAMING_SNAKE_CASE__ = src_lang SCREAMING_SNAKE_CASE__ = self(_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , **_lowercase ) SCREAMING_SNAKE_CASE__ = self.convert_tokens_to_ids(_lowercase ) SCREAMING_SNAKE_CASE__ = tgt_lang_id return inputs def __a ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __a ( self : Any , _lowercase : str ): """simple docstring""" return self.sp_model.encode(_lowercase , out_type=_lowercase ) def __a ( self : Tuple , _lowercase : Any ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE__ = self.sp_model.PieceToId(_lowercase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __a ( self : str , _lowercase : int ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __a ( self : List[str] , _lowercase : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """""".join(_lowercase ).replace(_lowercase , """ """ ).strip() return out_string def __a ( self : List[Any] , _lowercase : str , _lowercase : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_lowercase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return SCREAMING_SNAKE_CASE__ = os.path.join( _lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowercase ) elif not os.path.isfile(self.vocab_file ): with open(_lowercase , """wb""" ) as fi: SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto() fi.write(_lowercase ) return (out_vocab_file,) def __a ( self : Any , _lowercase : List[str] , _lowercase : str = "eng_Latn" , _lowercase : Optional[List[str]] = None , _lowercase : str = "fra_Latn" , **_lowercase : Any , ): """simple docstring""" SCREAMING_SNAKE_CASE__ = src_lang SCREAMING_SNAKE_CASE__ = tgt_lang return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase ) def __a ( self : Any ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def __a ( self : Dict ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __a ( self : Optional[Any] , _lowercase : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.lang_code_to_id[src_lang] if self.legacy_behaviour: SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [self.eos_token_id, self.cur_lang_code] else: SCREAMING_SNAKE_CASE__ = [self.cur_lang_code] SCREAMING_SNAKE_CASE__ = [self.eos_token_id] def __a ( self : int , _lowercase : str ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.lang_code_to_id[lang] if self.legacy_behaviour: SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [self.eos_token_id, self.cur_lang_code] else: SCREAMING_SNAKE_CASE__ = [self.cur_lang_code] SCREAMING_SNAKE_CASE__ = [self.eos_token_id]
359
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = checkpoint SCREAMING_SNAKE_CASE__ = {} SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_in.weight"""] SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_in.bias"""] SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_out.weight"""] SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_out.bias"""] SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.norm_out.weight"""] SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.norm_out.bias"""] SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_in.weight"""] SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_in.bias"""] SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_out.weight"""] SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_out.bias"""] SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.norm_out.weight"""] SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.norm_out.bias"""] SCREAMING_SNAKE_CASE__ = vae_state_dict["""quant_conv.weight"""] SCREAMING_SNAKE_CASE__ = vae_state_dict["""quant_conv.bias"""] SCREAMING_SNAKE_CASE__ = vae_state_dict["""post_quant_conv.weight"""] SCREAMING_SNAKE_CASE__ = vae_state_dict["""post_quant_conv.bias"""] # Retrieves the keys for the encoder down blocks only SCREAMING_SNAKE_CASE__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} ) SCREAMING_SNAKE_CASE__ = { layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the decoder up blocks only SCREAMING_SNAKE_CASE__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} ) SCREAMING_SNAKE_CASE__ = { layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__UpperCamelCase ) } for i in range(__UpperCamelCase ): SCREAMING_SNAKE_CASE__ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key] if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict: SCREAMING_SNAKE_CASE__ = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.weight""" ) SCREAMING_SNAKE_CASE__ = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.bias""" ) SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase ) SCREAMING_SNAKE_CASE__ = {"""old""": f"""down.{i}.block""", """new""": f"""down_blocks.{i}.resnets"""} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """encoder.mid.block""" in key] SCREAMING_SNAKE_CASE__ = 2 for i in range(1 , num_mid_res_blocks + 1 ): SCREAMING_SNAKE_CASE__ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key] SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase ) SCREAMING_SNAKE_CASE__ = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """encoder.mid.attn""" in key] SCREAMING_SNAKE_CASE__ = renew_vae_attention_paths(__UpperCamelCase ) SCREAMING_SNAKE_CASE__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) conv_attn_to_linear(__UpperCamelCase ) for i in range(__UpperCamelCase ): SCREAMING_SNAKE_CASE__ = num_up_blocks - 1 - i SCREAMING_SNAKE_CASE__ = [ key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key ] if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict: SCREAMING_SNAKE_CASE__ = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.weight""" ] SCREAMING_SNAKE_CASE__ = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.bias""" ] SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase ) SCREAMING_SNAKE_CASE__ = {"""old""": f"""up.{block_id}.block""", """new""": f"""up_blocks.{i}.resnets"""} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """decoder.mid.block""" in key] SCREAMING_SNAKE_CASE__ = 2 for i in range(1 , num_mid_res_blocks + 1 ): SCREAMING_SNAKE_CASE__ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key] SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase ) SCREAMING_SNAKE_CASE__ = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """decoder.mid.attn""" in key] SCREAMING_SNAKE_CASE__ = renew_vae_attention_paths(__UpperCamelCase ) SCREAMING_SNAKE_CASE__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) conv_attn_to_linear(__UpperCamelCase ) return new_checkpoint def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = requests.get( """ https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" ) SCREAMING_SNAKE_CASE__ = io.BytesIO(r.content ) SCREAMING_SNAKE_CASE__ = OmegaConf.load(__UpperCamelCase ) SCREAMING_SNAKE_CASE__ = 5_12 SCREAMING_SNAKE_CASE__ = """cuda""" if torch.cuda.is_available() else """cpu""" if checkpoint_path.endswith("""safetensors""" ): from safetensors import safe_open SCREAMING_SNAKE_CASE__ = {} with safe_open(__UpperCamelCase , framework="""pt""" , device="""cpu""" ) as f: for key in f.keys(): SCREAMING_SNAKE_CASE__ = f.get_tensor(__UpperCamelCase ) else: SCREAMING_SNAKE_CASE__ = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )["""state_dict"""] # Convert the VAE model. SCREAMING_SNAKE_CASE__ = create_vae_diffusers_config(__UpperCamelCase , image_size=__UpperCamelCase ) SCREAMING_SNAKE_CASE__ = custom_convert_ldm_vae_checkpoint(__UpperCamelCase , __UpperCamelCase ) SCREAMING_SNAKE_CASE__ = AutoencoderKL(**__UpperCamelCase ) vae.load_state_dict(__UpperCamelCase ) vae.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') __lowerCamelCase : Optional[int] = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
204
0
"""simple docstring""" import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger lowerCAmelCase__ : Dict = '<<<<<<< This should probably be modified because it mentions: ' lowerCAmelCase__ : int = '=======\n>>>>>>>\n' lowerCAmelCase__ : Optional[int] = [ 'TextEncoderConfig', 'ByteTextEncoder', 'SubwordTextEncoder', 'encoder_config', 'maybe_build_from_corpus', 'manual_dir', ] lowerCAmelCase__ : Dict = [ # (pattern, replacement) # Order is important here for some replacements (r'tfds\.core', r'datasets'), (r'tf\.io\.gfile\.GFile', r'open'), (r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'), (r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'), (r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'), (r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('), (r'tfds\.features\.FeaturesDict\(', r'dict('), (r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'), (r'tfds\.', r'datasets.'), (r'dl_manager\.manual_dir', r'self.config.data_dir'), (r'self\.builder_config', r'self.config'), ] def a_ ( lowerCamelCase ): return ConvertCommand(args.tfds_path , args.datasets_directory ) class snake_case ( __UpperCAmelCase ): """simple docstring""" @staticmethod def __lowerCAmelCase ( lowerCamelCase__ : ArgumentParser ): UpperCAmelCase__ = parser.add_parser( 'convert' ,help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' ,) train_parser.add_argument( '--tfds_path' ,type=lowerCamelCase__ ,required=lowerCamelCase__ ,help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' ,) train_parser.add_argument( '--datasets_directory' ,type=lowerCamelCase__ ,required=lowerCamelCase__ ,help='Path to the HuggingFace Datasets folder.' ) train_parser.set_defaults(func=lowerCamelCase__ ) def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,*lowerCamelCase__ : Union[str, Any] ): UpperCAmelCase__ = get_logger('datasets-cli/converting' ) UpperCAmelCase__ = tfds_path UpperCAmelCase__ = datasets_directory def __lowerCAmelCase ( self : List[Any] ): if os.path.isdir(self._tfds_path ): UpperCAmelCase__ = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): UpperCAmelCase__ = os.path.dirname(self._tfds_path ) else: raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' ) UpperCAmelCase__ = os.path.abspath(self._datasets_directory ) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) UpperCAmelCase__ = [] UpperCAmelCase__ = [] UpperCAmelCase__ = {} if os.path.isdir(self._tfds_path ): UpperCAmelCase__ = os.listdir(lowerCamelCase__ ) else: UpperCAmelCase__ = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''' ) UpperCAmelCase__ = os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase__ = os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('Skipping file' ) continue with open(lowerCamelCase__ ,encoding='utf-8' ) as f: UpperCAmelCase__ = f.readlines() UpperCAmelCase__ = [] UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = [] for line in lines: UpperCAmelCase__ = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: UpperCAmelCase__ = 'import datasets\n' elif "import tensorflow" in out_line: # order is important here UpperCAmelCase__ = '' continue elif "from absl import logging" in out_line: UpperCAmelCase__ = 'from datasets import logging\n' elif "getLogger" in out_line: UpperCAmelCase__ = out_line.replace('getLogger' ,'get_logger' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): UpperCAmelCase__ = True UpperCAmelCase__ = list(filter(lambda lowerCamelCase__ : e in out_line ,lowerCamelCase__ ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + '\n' ) out_lines.append(lowerCamelCase__ ) out_lines.append(lowerCamelCase__ ) continue else: for pattern, replacement in TO_CONVERT: UpperCAmelCase__ = re.sub(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: UpperCAmelCase__ = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' ,lowerCamelCase__ ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) ) UpperCAmelCase__ = 'from . import ' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: UpperCAmelCase__ = True out_lines.append(lowerCamelCase__ ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset UpperCAmelCase__ = f_name.replace('.py' ,'' ) UpperCAmelCase__ = os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase__ = os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ ) self._logger.info(f'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowerCamelCase__ ) if needs_manual_update: with_manual_update.append(lowerCamelCase__ ) with open(lowerCamelCase__ ,'w' ,encoding='utf-8' ) as f: f.writelines(lowerCamelCase__ ) self._logger.info(f'''Converted in {output_file}''' ) for utils_file in utils_files: try: UpperCAmelCase__ = os.path.basename(lowerCamelCase__ ) UpperCAmelCase__ = imports_to_builder_map[f_name.replace('.py' ,'' )] self._logger.info(f'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(lowerCamelCase__ ,lowerCamelCase__ ) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
98
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class snake_case__ : def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=10 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__="divided_space_time" , lowerCAmelCase__=None , ) -> List[str]: __magic_name__ : int = parent __magic_name__ : Tuple = batch_size __magic_name__ : int = image_size __magic_name__ : str = num_channels __magic_name__ : Dict = patch_size __magic_name__ : Tuple = num_frames __magic_name__ : List[Any] = is_training __magic_name__ : List[Any] = use_labels __magic_name__ : Dict = hidden_size __magic_name__ : List[Any] = num_hidden_layers __magic_name__ : str = num_attention_heads __magic_name__ : List[Any] = intermediate_size __magic_name__ : Dict = hidden_act __magic_name__ : List[Any] = hidden_dropout_prob __magic_name__ : Union[str, Any] = attention_probs_dropout_prob __magic_name__ : Tuple = attention_type __magic_name__ : List[str] = initializer_range __magic_name__ : Optional[Any] = scope __magic_name__ : Tuple = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __magic_name__ : str = (image_size // patch_size) ** 2 __magic_name__ : Any = (num_frames) * self.num_patches_per_frame + 1 def __magic_name__ ( self ) -> Dict: __magic_name__ : Optional[Any] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __magic_name__ : str = None if self.use_labels: __magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) __magic_name__ : Optional[Any] = self.get_config() return config, pixel_values, labels def __magic_name__ ( self ) -> str: __magic_name__ : Dict = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __magic_name__ : Optional[Any] = self.num_labels return config def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: __magic_name__ : List[Any] = TimesformerModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() __magic_name__ : Optional[Any] = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: __magic_name__ : int = TimesformerForVideoClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() __magic_name__ : List[Any] = model(lowerCAmelCase__ ) # verify the logits shape __magic_name__ : List[Any] = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , lowerCAmelCase__ ) def __magic_name__ ( self ) -> Any: __magic_name__ : Union[str, Any] = self.prepare_config_and_inputs() __magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = config_and_inputs __magic_name__ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): lowercase__ : Tuple = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowercase__ : Union[str, Any] = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) lowercase__ : int = False lowercase__ : str = False lowercase__ : Tuple = False lowercase__ : Any = False def __magic_name__ ( self ) -> List[Any]: __magic_name__ : List[Any] = TimesformerModelTester(self ) __magic_name__ : List[str] = ConfigTester( self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 ) def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[str]: __magic_name__ : List[str] = copy.deepcopy(lowerCAmelCase__ ) if return_labels: if model_class in get_values(lowerCAmelCase__ ): __magic_name__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) return inputs_dict def __magic_name__ ( self ) -> List[str]: self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def __magic_name__ ( self ) -> str: pass def __magic_name__ ( self ) -> Optional[int]: __magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ : List[Any] = model_class(lowerCAmelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __magic_name__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) ) def __magic_name__ ( self ) -> Optional[Any]: __magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ : Dict = model_class(lowerCAmelCase__ ) __magic_name__ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ : Optional[int] = [*signature.parameters.keys()] __magic_name__ : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def __magic_name__ ( self ) -> List[Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def __magic_name__ ( self ) -> Union[str, Any]: __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*lowerCAmelCase__ ) @slow def __magic_name__ ( self ) -> Optional[int]: for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : List[str] = TimesformerModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def __magic_name__ ( self ) -> List[Any]: if not self.has_attentions: pass else: __magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : Optional[int] = True for model_class in self.all_model_classes: __magic_name__ : Tuple = self.model_tester.seq_length __magic_name__ : int = self.model_tester.num_frames __magic_name__ : Any = True __magic_name__ : Tuple = False __magic_name__ : Optional[int] = True __magic_name__ : str = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): __magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) __magic_name__ : List[str] = outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __magic_name__ : Optional[Any] = True __magic_name__ : Optional[Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): __magic_name__ : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) __magic_name__ : int = outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __magic_name__ : Union[str, Any] = len(lowerCAmelCase__ ) # Check attention is always last and order is fine __magic_name__ : str = True __magic_name__ : Optional[Any] = True __magic_name__ : int = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): __magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertEqual(out_len + 1 , len(lowerCAmelCase__ ) ) __magic_name__ : Union[str, Any] = outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def __magic_name__ ( self ) -> Any: def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): __magic_name__ : Union[str, Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): __magic_name__ : int = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) __magic_name__ : Optional[Any] = outputs.hidden_states __magic_name__ : str = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) __magic_name__ : str = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ : Optional[Any] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __magic_name__ : Union[str, Any] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def UpperCamelCase ( ): """simple docstring""" __magic_name__ : List[Any] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""", filename="""eating_spaghetti.npy""", repo_type="""dataset""" ) __magic_name__ : List[str] = np.load(_A ) return list(_A ) @require_torch @require_vision class snake_case__ ( unittest.TestCase ): @cached_property def __magic_name__ ( self ) -> Optional[Any]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def __magic_name__ ( self ) -> List[Any]: __magic_name__ : Dict = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( lowerCAmelCase__ ) __magic_name__ : str = self.default_image_processor __magic_name__ : Any = prepare_video() __magic_name__ : Dict = image_processor(video[:8] , return_tensors="""pt""" ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): __magic_name__ : int = model(**lowerCAmelCase__ ) # verify the logits __magic_name__ : Optional[int] = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) __magic_name__ : Union[str, Any] = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
342
0
"""simple docstring""" # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny model through reduction of a normal pre-trained model, but keeping the # full vocab, merges file, and thus also resulting in a larger model due to a large vocab size. # This gives ~3MB in total for all files. # # If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated # # # It will be used then as "stas/tiny-wmt19-en-de" # Build from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration a = 'facebook/wmt19-en-de' a = FSMTTokenizer.from_pretrained(mname) # get the correct vocab sizes, etc. from the master model a = FSMTConfig.from_pretrained(mname) config.update( dict( d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) ) a = FSMTForConditionalGeneration(config) print(F'''num of params {tiny_model.num_parameters()}''') # Test a = tokenizer(['''Making tiny model'''], return_tensors='''pt''') a = tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save a = 'tiny-wmt19-en-de' tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F'''Generated {mname_tiny}''') # Upload # transformers-cli upload tiny-wmt19-en-de
356
"""simple docstring""" from __future__ import annotations import time import numpy as np a = [8, 5, 9, 7] a = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] a = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class lowercase_ : '''simple docstring''' def __init__( self : str , _UpperCAmelCase : list[int] , _UpperCAmelCase : list[list[int]] , _UpperCAmelCase : list[list[int]] , ): _A = claim_vector _A = allocated_resources_table _A = maximum_claim_table def lowerCAmelCase_ ( self : Tuple ): return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def lowerCAmelCase_ ( self : Tuple ): return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def lowerCAmelCase_ ( self : List[Any] ): return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(_UpperCAmelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def lowerCAmelCase_ ( self : List[Any] ): return {self.__need().index(_UpperCAmelCase ): i for i in self.__need()} def lowerCAmelCase_ ( self : List[str] , **_UpperCAmelCase : int ): _A = self.__need() _A = self.__allocated_resources_table _A = self.__available_resources() _A = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('_' * 50 + '\n' ) while need_list: _A = False for each_need in need_list: _A = True for index, need in enumerate(_UpperCAmelCase ): if need > available_resources[index]: _A = False break if execution: _A = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: _A = original_need_index print(F'''Process {process_number + 1} is executing.''' ) # remove the process run from stack need_list.remove(_UpperCAmelCase ) # update available/freed resources stack _A = np.array(_UpperCAmelCase ) + np.array( alloc_resources_table[process_number] ) print( 'Updated available resource stack for processes: ' + ' '.join([str(_UpperCAmelCase ) for x in available_resources] ) ) break if safe: print('The process is in a safe state.\n' ) else: print('System in unsafe state. Aborting...\n' ) break def lowerCAmelCase_ ( self : Union[str, Any] ): print(' ' * 9 + 'Allocated Resource Table' ) for item in self.__allocated_resources_table: print( F'''P{self.__allocated_resources_table.index(_UpperCAmelCase ) + 1}''' + ' '.join(F'''{it:>8}''' for it in item ) + '\n' ) print(' ' * 9 + 'System Resource Table' ) for item in self.__maximum_claim_table: print( F'''P{self.__maximum_claim_table.index(_UpperCAmelCase ) + 1}''' + ' '.join(F'''{it:>8}''' for it in item ) + '\n' ) print( 'Current Usage by Active Processes: ' + ' '.join(str(_UpperCAmelCase ) for x in self.__claim_vector ) ) print( 'Initial Available Resources: ' + ' '.join(str(_UpperCAmelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
271
0
import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowerCAmelCase__ : List[Any] =argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '''--original_config_file''', default=None, type=str, help='''The YAML config file corresponding to the original architecture.''', ) parser.add_argument( '''--num_in_channels''', default=None, type=int, help='''The number of input channels. If `None` number of input channels will be automatically inferred.''', ) parser.add_argument( '''--scheduler_type''', default='''pndm''', type=str, help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''', ) parser.add_argument( '''--pipeline_type''', default=None, type=str, help=( '''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'''' '''. If `None` pipeline will be automatically inferred.''' ), ) parser.add_argument( '''--image_size''', default=None, type=int, help=( '''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2''' ''' Base. Use 768 for Stable Diffusion v2.''' ), ) parser.add_argument( '''--prediction_type''', default=None, type=str, help=( '''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable''' ''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.''' ), ) parser.add_argument( '''--extract_ema''', action='''store_true''', help=( '''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights''' ''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield''' ''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.''' ), ) parser.add_argument( '''--upcast_attention''', action='''store_true''', help=( '''Whether the attention computation should always be upcasted. This is necessary when running stable''' ''' diffusion 2.1.''' ), ) parser.add_argument( '''--from_safetensors''', action='''store_true''', help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''', ) parser.add_argument( '''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''') parser.add_argument( '''--stable_unclip''', type=str, default=None, required=False, help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''', ) parser.add_argument( '''--stable_unclip_prior''', type=str, default=None, required=False, help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''', ) parser.add_argument( '''--clip_stats_path''', type=str, help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''', required=False, ) parser.add_argument( '''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.''' ) parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--vae_path''', type=str, default=None, required=False, help='''Set to a path, hub id to an already converted vae to not convert it again.''', ) lowerCAmelCase__ : Any =parser.parse_args() lowerCAmelCase__ : Dict =download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
257
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) lowerCamelCase : List[str] = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[Any] = '''canine''' def __init__( self : List[str] , __a : Optional[int]=768 , __a : Any=12 , __a : Any=12 , __a : Dict=3072 , __a : Dict="gelu" , __a : List[Any]=0.1 , __a : List[Any]=0.1 , __a : Tuple=16384 , __a : List[Any]=16 , __a : List[Any]=0.02 , __a : Optional[Any]=1E-12 , __a : Dict=0 , __a : List[Any]=0xe_0_0_0 , __a : Optional[int]=0xe_0_0_1 , __a : Any=4 , __a : Dict=4 , __a : Optional[int]=8 , __a : Any=16384 , __a : Optional[Any]=128 , **__a : List[str] , ) -> Union[str, Any]: """simple docstring""" super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) __lowercase : int = max_position_embeddings __lowercase : List[str] = hidden_size __lowercase : List[Any] = num_hidden_layers __lowercase : Dict = num_attention_heads __lowercase : int = intermediate_size __lowercase : Dict = hidden_act __lowercase : Tuple = hidden_dropout_prob __lowercase : Union[str, Any] = attention_probs_dropout_prob __lowercase : Union[str, Any] = initializer_range __lowercase : Any = type_vocab_size __lowercase : int = layer_norm_eps # Character config: __lowercase : int = downsampling_rate __lowercase : str = upsampling_kernel_size __lowercase : Union[str, Any] = num_hash_functions __lowercase : Optional[Any] = num_hash_buckets __lowercase : Optional[int] = local_transformer_stride
233
0
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __UpperCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def __lowerCamelCase ( __magic_name__ : Tuple , __magic_name__ : Any=None ): require_version(deps[pkg] , __magic_name__ )
42
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class lowerCamelCase__ ( _a ): @require_torch def _lowerCamelCase ( self : Union[str, Any] ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched a__: Dict ="\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " a__: Union[str, Any] ="\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " a__: Dict ="\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache a__: Tuple ="hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(_a ) BertModel.from_pretrained(_a ) BertTokenizer.from_pretrained(_a ) pipeline(task="fill-mask" , model=_a ) # baseline - just load from_pretrained with normal network a__: Optional[int] =[sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed a__: str =self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__: Union[str, Any] ="1" a__: Any =subprocess.run(_a , env=_a , check=_a , capture_output=_a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _lowerCamelCase ( self : str ): # python one-liner segments # this must be loaded before socket.socket is monkey-patched a__: Optional[int] ="\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " a__: Tuple ="\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " a__: List[Any] ="\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache a__: str ="hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(_a ) BertModel.from_pretrained(_a ) BertTokenizer.from_pretrained(_a ) pipeline(task="fill-mask" , model=_a ) # baseline - just load from_pretrained with normal network a__: Any =[sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed a__: Optional[Any] =self.get_env() a__: Union[str, Any] =subprocess.run(_a , env=_a , check=_a , capture_output=_a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _lowerCamelCase ( self : List[str] ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched a__: Tuple ="\nfrom transformers import BertConfig, BertModel, BertTokenizer\n " a__: str ="\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n " a__: int ="\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " # baseline - just load from_pretrained with normal network a__: Union[str, Any] =[sys.executable, "-c", "\n".join([load, run] )] # should succeed a__: Optional[Any] =self.get_env() a__: Optional[Any] =subprocess.run(_a , env=_a , check=_a , capture_output=_a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # next emulate no network a__: int =[sys.executable, "-c", "\n".join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__: Tuple ="1" a__: Dict =subprocess.run(_a , env=_a , check=_a , capture_output=_a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _lowerCamelCase ( self : Optional[Any] ): a__: Dict ="\nfrom transformers import pipeline\n " a__: Union[str, Any] ="\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n " a__: List[str] ="\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " a__: Dict =self.get_env() a__: Optional[Any] ="1" a__: Dict =[sys.executable, "-c", "\n".join([load, mock, run] )] a__: Optional[int] =subprocess.run(_a , env=_a , check=_a , capture_output=_a ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( "You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , ) @require_torch def _lowerCamelCase ( self : Optional[int] ): a__: Optional[Any] ="\nfrom transformers import AutoModel\n " a__: str ="\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n " # baseline - just load from_pretrained with normal network a__: Tuple =[sys.executable, "-c", "\n".join([load, run] )] # should succeed a__: Any =self.get_env() a__: int =subprocess.run(_a , env=_a , check=_a , capture_output=_a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__: List[Any] ="1" a__: int =subprocess.run(_a , env=_a , check=_a , capture_output=_a ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() )
42
1
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowerCAmelCase__ :int = logging.get_logger(__name__) class __a ( UpperCAmelCase ): def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" warnings.warn( 'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use SegformerImageProcessor instead.' , _SCREAMING_SNAKE_CASE , ) super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
329
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowercase_ = logging.get_logger(__name__) class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ): _a = ["""input_features"""] def __init__( self , lowerCAmelCase=80 , lowerCAmelCase=16_000 , lowerCAmelCase=160 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=0.0 , lowerCAmelCase=False , **lowerCAmelCase , ) -> Any: '''simple docstring''' super().__init__( feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) _lowercase =n_fft _lowercase =hop_length _lowercase =chunk_length _lowercase =chunk_length * sampling_rate _lowercase =self.n_samples // hop_length _lowercase =sampling_rate _lowercase =mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ) def A__ ( self , lowerCAmelCase ) -> np.ndarray: '''simple docstring''' _lowercase =spectrogram( lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , ) _lowercase =log_spec[:, :-1] _lowercase =np.maximum(lowerCAmelCase , log_spec.max() - 8.0 ) _lowercase =(log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def A__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0.0 ) -> List[np.ndarray]: '''simple docstring''' if attention_mask is not None: _lowercase =np.array(lowerCAmelCase , np.intaa ) _lowercase =[] for vector, length in zip(lowerCAmelCase , attention_mask.sum(-1 ) ): _lowercase =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: _lowercase =padding_value normed_input_values.append(lowerCAmelCase ) else: _lowercase =[(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def __call__( self , lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "max_length" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) _lowercase =isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _lowercase =is_batched_numpy or ( isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowercase =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ): _lowercase =np.asarray(lowerCAmelCase , dtype=np.floataa ) elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _lowercase =raw_speech.astype(np.floataa ) # always return batch if not is_batched: _lowercase =[np.asarray([raw_speech] ).T] _lowercase =BatchFeature({'input_features': raw_speech} ) # convert into correct format for padding _lowercase =self.pad( lowerCAmelCase , padding=lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: _lowercase =self.zero_mean_unit_var_norm( padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , ) _lowercase =np.stack(padded_inputs['input_features'] , axis=0 ) # make sure list is in array format _lowercase =padded_inputs.get('input_features' ).transpose(2 , 0 , 1 ) _lowercase =[self._np_extract_fbank_features(lowerCAmelCase ) for waveform in input_features[0]] if isinstance(input_features[0] , lowerCAmelCase ): _lowercase =[np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in input_features] else: _lowercase =input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) _lowercase =padded_inputs['attention_mask'][:, :: self.hop_length] if return_tensors is not None: _lowercase =padded_inputs.convert_to_tensors(lowerCAmelCase ) return padded_inputs def A__ ( self ) -> Dict[str, Any]: '''simple docstring''' _lowercase =copy.deepcopy(self.__dict__ ) _lowercase =self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
205
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''WavLMForAudioFrameClassification''', '''WavLMForCTC''', '''WavLMForSequenceClassification''', '''WavLMForXVector''', '''WavLMModel''', '''WavLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
15
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __UpperCAmelCase ( __a : Dict=None ) -> str: """simple docstring""" if subparsers is not None: _a : Union[str, Any] = subparsers.add_parser('''test''' ) else: _a : List[str] = argparse.ArgumentParser('''Accelerate test command''' ) parser.add_argument( '''--config_file''' ,default=__a ,help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) ,) if subparsers is not None: parser.set_defaults(func=__a ) return parser def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]: """simple docstring""" _a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] ) if args.config_file is None: _a : List[Any] = script_name else: _a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}""" _a : str = ['''accelerate-launch'''] + test_args.split() _a : str = execute_subprocess_async(__a ,env=os.environ.copy() ) if result.returncode == 0: print('''Test is a success! You are ready for your distributed training!''' ) def __UpperCAmelCase ( ) -> List[Any]: """simple docstring""" _a : Optional[int] = test_command_parser() _a : List[Any] = parser.parse_args() test_command(__a ) if __name__ == "__main__": main()
15
1
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class UpperCAmelCase ( A_ ): @slow @require_torch def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[int]: '''simple docstring''' snake_case : Tuple = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" ) snake_case : Optional[int] = BertTokenizer.from_pretrained("bert-base-uncased" ) snake_case : List[str] = bertabert.config.encoder.vocab_size snake_case : List[str] = tokenizer.sep_token_id snake_case : str = tokenizer.cls_token_id snake_case : Optional[int] = 1_28 snake_case : int = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" ) snake_case : List[Any] = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" ) snake_case : int = train_dataset.select(range(32 ) ) snake_case : List[str] = val_dataset.select(range(16 ) ) snake_case : int = 4 def _map_to_encoder_decoder_inputs(snake_case__ : List[Any] ): # Tokenizer will automatically set [BOS] <text> [EOS] snake_case : int = tokenizer(batch["article"] , padding="max_length" , truncation=A_ , max_length=5_12 ) snake_case : Union[str, Any] = tokenizer(batch["highlights"] , padding="max_length" , truncation=A_ , max_length=1_28 ) snake_case : Any = inputs.input_ids snake_case : Union[str, Any] = inputs.attention_mask snake_case : Dict = outputs.input_ids snake_case : List[str] = outputs.input_ids.copy() snake_case : Dict = [ [-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] snake_case : Optional[Any] = outputs.attention_mask assert all(len(A_ ) == 5_12 for x in inputs.input_ids ) assert all(len(A_ ) == 1_28 for x in outputs.input_ids ) return batch def _compute_metrics(snake_case__ : Tuple ): snake_case : Tuple = pred.label_ids snake_case : Optional[Any] = pred.predictions # all unnecessary tokens are removed snake_case : int = tokenizer.batch_decode(A_ , skip_special_tokens=A_ ) snake_case : Union[str, Any] = tokenizer.batch_decode(A_ , skip_special_tokens=A_ ) snake_case : Dict = sum([int(pred_str[i] == label_str[i] ) for i in range(len(A_ ) )] ) / len(A_ ) return {"accuracy": accuracy} # map train dataset snake_case : Union[str, Any] = train_dataset.map( _map_to_encoder_decoder_inputs , batched=A_ , batch_size=A_ , remove_columns=["article", "highlights"] , ) train_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) # same for validation dataset snake_case : Optional[int] = val_dataset.map( _map_to_encoder_decoder_inputs , batched=A_ , batch_size=A_ , remove_columns=["article", "highlights"] , ) val_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) snake_case : List[str] = self.get_auto_remove_tmp_dir() snake_case : Dict = SeqaSeqTrainingArguments( output_dir=A_ , per_device_train_batch_size=A_ , per_device_eval_batch_size=A_ , predict_with_generate=A_ , evaluation_strategy="steps" , do_train=A_ , do_eval=A_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer snake_case : Optional[int] = SeqaSeqTrainer( model=A_ , args=A_ , compute_metrics=_compute_metrics , train_dataset=A_ , eval_dataset=A_ , tokenizer=A_ , ) # start training trainer.train()
59
import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor lowerCamelCase : Any = logging.get_logger(__name__) class A( UpperCamelCase ): '''simple docstring''' def __init__( self : Dict , *A_ : Optional[int] , **A_ : int ) -> None: """simple docstring""" warnings.warn( 'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DonutImageProcessor instead.' , A_ , ) super().__init__(*A_ , **A_ )
204
0
import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename _lowerCamelCase = 'http://www.mocksite.com/file1.txt' _lowerCamelCase = '"text": ["foo", "foo"]' _lowerCamelCase = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8' class a : '''simple docstring''' lowerCAmelCase : str = 200 lowerCAmelCase : List[str] = {'Content-Length': '100'} lowerCAmelCase : Union[str, Any] = {} def lowerCamelCase_ ( self : List[str] , **__snake_case : int ): return [bytes(__snake_case , '''utf-8''' )] def SCREAMING_SNAKE_CASE ( *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Dict ) -> List[str]: return MockResponse() @pytest.mark.parametrize('''urls_type''' , [str, list, dict] ) def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] ) -> List[Any]: import requests monkeypatch.setattr(__UpperCamelCase , '''request''' , __UpperCamelCase ) UpperCAmelCase_ = URL if issubclass(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase_ = url elif issubclass(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase_ = [url] elif issubclass(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase_ = {'''train''': url} UpperCAmelCase_ = '''dummy''' UpperCAmelCase_ = '''downloads''' UpperCAmelCase_ = tmp_path UpperCAmelCase_ = DownloadConfig( cache_dir=os.path.join(__UpperCamelCase , __UpperCamelCase ) , use_etag=__UpperCamelCase , ) UpperCAmelCase_ = DownloadManager(dataset_name=__UpperCamelCase , download_config=__UpperCamelCase ) UpperCAmelCase_ = dl_manager.download(__UpperCamelCase ) UpperCAmelCase_ = urls for downloaded_paths in [downloaded_paths]: if isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase_ = [downloaded_paths] UpperCAmelCase_ = [urls] elif isinstance(__UpperCamelCase , __UpperCamelCase ): assert "train" in downloaded_paths.keys() UpperCAmelCase_ = downloaded_paths.values() UpperCAmelCase_ = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(__UpperCamelCase , __UpperCamelCase ): assert downloaded_path == dl_manager.downloaded_paths[input_url] UpperCAmelCase_ = Path(__UpperCamelCase ) UpperCAmelCase_ = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() UpperCAmelCase_ = downloaded_path.read_text() assert content == CONTENT UpperCAmelCase_ = downloaded_path.with_suffix('''.json''' ) assert metadata_downloaded_path.exists() UpperCAmelCase_ = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('''paths_type''' , [str, list, dict] ) def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] ) -> int: UpperCAmelCase_ = str(__UpperCamelCase ) if issubclass(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase_ = filename elif issubclass(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase_ = [filename] elif issubclass(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase_ = {'''train''': filename} UpperCAmelCase_ = '''dummy''' UpperCAmelCase_ = xz_file.parent UpperCAmelCase_ = '''extracted''' UpperCAmelCase_ = DownloadConfig( cache_dir=__UpperCamelCase , use_etag=__UpperCamelCase , ) UpperCAmelCase_ = DownloadManager(dataset_name=__UpperCamelCase , download_config=__UpperCamelCase ) UpperCAmelCase_ = dl_manager.extract(__UpperCamelCase ) UpperCAmelCase_ = paths for extracted_paths in [extracted_paths]: if isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCAmelCase_ = [extracted_paths] UpperCAmelCase_ = [paths] elif isinstance(__UpperCamelCase , __UpperCamelCase ): assert "train" in extracted_paths.keys() UpperCAmelCase_ = extracted_paths.values() UpperCAmelCase_ = paths.values() assert extracted_paths for extracted_path, input_path in zip(__UpperCamelCase , __UpperCamelCase ): assert extracted_path == dl_manager.extracted_paths[input_path] UpperCAmelCase_ = Path(__UpperCamelCase ) UpperCAmelCase_ = extracted_path.parts assert parts[-1] == hash_url_to_filename(__UpperCamelCase , etag=__UpperCamelCase ) assert parts[-2] == extracted_subdir assert extracted_path.exists() UpperCAmelCase_ = extracted_path.read_text() UpperCAmelCase_ = text_file.read_text() assert extracted_file_content == expected_file_content def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> Optional[Any]: assert path.endswith('''.jsonl''' ) for num_items, line in enumerate(__UpperCamelCase , start=1 ): UpperCAmelCase_ = json.loads(line.decode('''utf-8''' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] ) def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ = request.getfixturevalue(__UpperCamelCase ) UpperCAmelCase_ = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__UpperCamelCase ) , start=1 ): _test_jsonl(__UpperCamelCase , __UpperCamelCase ) assert num_jsonl == 2 @pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] ) def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : str ) -> Optional[int]: UpperCAmelCase_ = request.getfixturevalue(__UpperCamelCase ) UpperCAmelCase_ = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__UpperCamelCase ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__UpperCamelCase ) , start=1 ): _test_jsonl(__UpperCamelCase , __UpperCamelCase ) assert num_tar == 1 assert num_jsonl == 2 def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(__UpperCamelCase ) , start=1 ): assert os.path.basename(__UpperCamelCase ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
177
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowerCamelCase = { 'configuration_conditional_detr': [ 'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConditionalDetrConfig', 'ConditionalDetrOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['ConditionalDetrFeatureExtractor'] _lowerCamelCase = ['ConditionalDetrImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ 'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConditionalDetrForObjectDetection', 'ConditionalDetrForSegmentation', 'ConditionalDetrModel', 'ConditionalDetrPreTrainedModel', ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys _lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
177
1
import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase = '''▁''' UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class snake_case_ ( __A ,unittest.TestCase ): __A : Optional[int] = BigBirdTokenizer __A : Any = BigBirdTokenizerFast __A : Dict = True __A : Optional[Any] = True def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: super().setUp() lowercase__ : Union[str, Any] = self.tokenizer_class(lowercase_ , keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCamelCase ( self : Tuple ) -> List[Any]: lowercase__ : List[Any] = "<s>" lowercase__ : Optional[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ ) def __UpperCamelCase ( self : Optional[Any] ) -> int: lowercase__ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "[MASK]" ) self.assertEqual(len(lowercase_ ) , 10_04 ) def __UpperCamelCase ( self : List[Any] ) -> str: self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def __UpperCamelCase ( self : Optional[int] ) -> Tuple: if not self.test_rust_tokenizer: return lowercase__ : Dict = self.get_tokenizer() lowercase__ : Any = self.get_rust_tokenizer() lowercase__ : str = "I was born in 92000, and this is falsé." lowercase__ : Optional[int] = tokenizer.tokenize(lowercase_ ) lowercase__ : Any = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) lowercase__ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) lowercase__ : List[str] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) lowercase__ : str = self.get_rust_tokenizer() lowercase__ : Union[str, Any] = tokenizer.encode(lowercase_ ) lowercase__ : Optional[Any] = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Dict ) -> str: lowercase__ : Optional[Any] = BigBirdTokenizer(lowercase_ , keep_accents=lowercase_ ) lowercase__ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ) , [2_85, 46, 10, 1_70, 3_82] , ) lowercase__ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) lowercase__ : Tuple = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowercase__ : Tuple = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def __UpperCamelCase ( self : List[Any] ) -> Optional[int]: return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) @slow def __UpperCamelCase ( self : Optional[int] ) -> int: lowercase__ : Optional[Any] = "Hello World!" lowercase__ : Optional[int] = [65, 1_85_36, 22_60, 1_01, 66] self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @slow def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: lowercase__ : Tuple = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) # fmt: off lowercase__ : Optional[Any] = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231 # fmt: on self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @require_torch @slow def __UpperCamelCase ( self : List[Any] ) -> int: import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence lowercase__ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10] lowercase__ : str = " ".join(lowercase_ ) lowercase__ : List[str] = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" , return_token_type_ids=lowercase_ ) lowercase__ : int = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=lowercase_ ) lowercase__ : Optional[int] = BigBirdConfig(attention_type="original_full" ) lowercase__ : List[str] = BigBirdModel(lowercase_ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowercase_ ) model(**lowercase_ ) @slow def __UpperCamelCase ( self : str ) -> Optional[int]: lowercase__ : List[Any] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) lowercase__ : str = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids ) self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" ) @slow def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: # fmt: off lowercase__ : str = {"input_ids": [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
87
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar __lowerCAmelCase = TypeVar("""T""") class UpperCAmelCase__ ( Generic[T] ): """simple docstring""" def __init__( self : Tuple ,_a : T ): '''simple docstring''' _a : List[str] = data _a : Node[T] | None = None def __str__( self : Dict ): '''simple docstring''' return F"""{self.data}""" class UpperCAmelCase__ ( Generic[T] ): """simple docstring""" def __init__( self : Optional[int] ): '''simple docstring''' _a : Node[T] | None = None def __iter__( self : str ): '''simple docstring''' _a : Tuple = self.top while node: yield node.data _a : int = node.next def __str__( self : str ): '''simple docstring''' return "->".join([str(_a ) for item in self] ) def __len__( self : Optional[Any] ): '''simple docstring''' return len(tuple(iter(self ) ) ) def __lowercase ( self : str ): '''simple docstring''' return self.top is None def __lowercase ( self : List[Any] ,_a : T ): '''simple docstring''' _a : int = Node(_a ) if not self.is_empty(): _a : Optional[Any] = self.top _a : List[str] = node def __lowercase ( self : Tuple ): '''simple docstring''' if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top ,_a ) _a : List[Any] = self.top _a : int = self.top.next return pop_node.data def __lowercase ( self : List[str] ): '''simple docstring''' if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def __lowercase ( self : List[str] ): '''simple docstring''' _a : Optional[int] = None if __name__ == "__main__": from doctest import testmod testmod()
271
0
from math import isqrt, loga def A ( lowercase ) -> list[int]: '''simple docstring''' UpperCamelCase = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , lowercase , lowercase ): UpperCamelCase = False return [i for i in range(2 , lowercase ) if is_prime[i]] def A ( lowercase = 800_800 , lowercase = 800_800 ) -> int: '''simple docstring''' UpperCamelCase = degree * loga(lowercase ) UpperCamelCase = int(lowercase ) UpperCamelCase = calculate_prime_numbers(lowercase ) UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = len(lowercase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F'''{solution() = }''')
110
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class lowercase : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = num_choices UpperCamelCase = scope def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int: """simple docstring""" UpperCamelCase = LlamaModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ ) UpperCamelCase = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> List[Any]: """simple docstring""" UpperCamelCase = True UpperCamelCase = LlamaModel(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , ) UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , ) UpperCamelCase = model(A_ , attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> str: """simple docstring""" UpperCamelCase = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]: """simple docstring""" UpperCamelCase = True UpperCamelCase = True UpperCamelCase = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , ) UpperCamelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['hidden_states'][0] UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0] # select random slice UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : str = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () __lowercase : str = (LlamaForCausalLM,) if is_torch_available() else () __lowercase : Any = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) __lowercase : int = False __lowercase : Optional[int] = False def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = LlamaModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = 3 UpperCamelCase = input_dict['input_ids'] UpperCamelCase = input_ids.ne(1 ).to(A_ ) UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = 3 UpperCamelCase = 'single_label_classification' UpperCamelCase = input_dict['input_ids'] UpperCamelCase = input_ids.ne(1 ).to(A_ ) UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = 3 UpperCamelCase = 'multi_label_classification' UpperCamelCase = input_dict['input_ids'] UpperCamelCase = input_ids.ne(1 ).to(A_ ) UpperCamelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCamelCase = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" pass @parameterized.expand([('linear',), ('dynamic',)] ) def __UpperCamelCase ( self , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ids_tensor([1, 10] , config.vocab_size ) UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCamelCase = LlamaModel(A_ ) original_model.to(A_ ) original_model.eval() UpperCamelCase = original_model(A_ ).last_hidden_state UpperCamelCase = original_model(A_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCamelCase = {'type': scaling_type, 'factor': 10.0} UpperCamelCase = LlamaModel(A_ ) scaled_model.to(A_ ) scaled_model.eval() UpperCamelCase = scaled_model(A_ ).last_hidden_state UpperCamelCase = scaled_model(A_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A_ , A_ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) ) @require_torch class lowercase ( unittest.TestCase ): @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' ) UpperCamelCase = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 UpperCamelCase = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off UpperCamelCase = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' ) UpperCamelCase = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 UpperCamelCase = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off UpperCamelCase = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' ) UpperCamelCase = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 UpperCamelCase = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off UpperCamelCase = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' ) UpperCamelCase = model(torch.tensor(A_ ) ) UpperCamelCase = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 ) # fmt: off UpperCamelCase = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Model is curently gated' ) @slow def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' UpperCamelCase = 'Simply put, the theory of relativity states that ' UpperCamelCase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) UpperCamelCase = tokenizer.encode(A_ , return_tensors='pt' ) UpperCamelCase = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=A_ ) # greedy generation outputs UpperCamelCase = model.generate(A_ , max_new_tokens=64 , top_p=A_ , temperature=1 , do_sample=A_ ) UpperCamelCase = tokenizer.decode(generated_ids[0] , skip_special_tokens=A_ ) self.assertEqual(A_ , A_ )
110
1
'''simple docstring''' from math import factorial def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> float: if successes > trials: raise ValueError('successes must be lower or equal to trials' ) if trials < 0 or successes < 0: raise ValueError('the function is defined for non-negative integers' ) if not isinstance(__A , __A ) or not isinstance(__A , __A ): raise ValueError('the function is defined for non-negative integers' ) if not 0 < prob < 1: raise ValueError('prob has to be in range of 1 - 0' ) _snake_case = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! _snake_case = float(factorial(__A ) ) coefficient /= factorial(__A ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print("Probability of 2 successes out of 4 trails") print("with probability of 0.75 is:", end=" ") print(binomial_distribution(2, 4, 0.75))
42
'''simple docstring''' def SCREAMING_SNAKE_CASE__ ( __A = 100 ) -> int: _snake_case = n * (n + 1) * (2 * n + 1) / 6 _snake_case = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'''{solution() = }''')
42
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __A : Optional[Any] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[Any] = ['YolosFeatureExtractor'] __A : Dict = ['YolosImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST', 'YolosForObjectDetection', 'YolosModel', 'YolosPreTrainedModel', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys __A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
366
__A : List[Any] = [ 9_99, 8_00, 7_99, 6_00, 5_99, 5_00, 4_00, 3_99, 3_77, 3_55, 3_33, 3_11, 2_88, 2_66, 2_44, 2_22, 2_00, 1_99, 1_77, 1_55, 1_33, 1_11, 88, 66, 44, 22, 0, ] __A : int = [ 9_99, 9_76, 9_52, 9_28, 9_05, 8_82, 8_58, 8_57, 8_10, 7_62, 7_15, 7_14, 5_72, 4_29, 4_28, 2_86, 2_85, 2_38, 1_90, 1_43, 1_42, 1_18, 95, 71, 47, 24, 0, ] __A : Any = [ 9_99, 9_88, 9_77, 9_66, 9_55, 9_44, 9_33, 9_22, 9_11, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_50, 3_00, 2_99, 2_66, 2_33, 2_00, 1_99, 1_79, 1_59, 1_40, 1_20, 1_00, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] __A : Dict = [ 9_99, 9_95, 9_92, 9_89, 9_85, 9_81, 9_78, 9_75, 9_71, 9_67, 9_64, 9_61, 9_57, 9_56, 9_51, 9_47, 9_42, 9_37, 9_33, 9_28, 9_23, 9_19, 9_14, 9_13, 9_08, 9_03, 8_97, 8_92, 8_87, 8_81, 8_76, 8_71, 8_70, 8_64, 8_58, 8_52, 8_46, 8_40, 8_34, 8_28, 8_27, 8_20, 8_13, 8_06, 7_99, 7_92, 7_85, 7_84, 7_77, 7_70, 7_63, 7_56, 7_49, 7_42, 7_41, 7_33, 7_24, 7_16, 7_07, 6_99, 6_98, 6_88, 6_77, 6_66, 6_56, 6_55, 6_45, 6_34, 6_23, 6_13, 6_12, 5_98, 5_84, 5_70, 5_69, 5_55, 5_41, 5_27, 5_26, 5_05, 4_84, 4_83, 4_62, 4_40, 4_39, 3_96, 3_95, 3_52, 3_51, 3_08, 3_07, 2_64, 2_63, 2_20, 2_19, 1_76, 1_32, 88, 44, 0, ] __A : List[str] = [ 9_99, 9_97, 9_95, 9_92, 9_90, 9_88, 9_86, 9_84, 9_81, 9_79, 9_77, 9_75, 9_72, 9_70, 9_68, 9_66, 9_64, 9_61, 9_59, 9_57, 9_56, 9_54, 9_51, 9_49, 9_46, 9_44, 9_41, 9_39, 9_36, 9_34, 9_31, 9_29, 9_26, 9_24, 9_21, 9_19, 9_16, 9_14, 9_13, 9_10, 9_07, 9_05, 9_02, 8_99, 8_96, 8_93, 8_91, 8_88, 8_85, 8_82, 8_79, 8_77, 8_74, 8_71, 8_70, 8_67, 8_64, 8_61, 8_58, 8_55, 8_52, 8_49, 8_46, 8_43, 8_40, 8_37, 8_34, 8_31, 8_28, 8_27, 8_24, 8_21, 8_17, 8_14, 8_11, 8_08, 8_04, 8_01, 7_98, 7_95, 7_91, 7_88, 7_85, 7_84, 7_80, 7_77, 7_74, 7_70, 7_66, 7_63, 7_60, 7_56, 7_52, 7_49, 7_46, 7_42, 7_41, 7_37, 7_33, 7_30, 7_26, 7_22, 7_18, 7_14, 7_10, 7_07, 7_03, 6_99, 6_98, 6_94, 6_90, 6_85, 6_81, 6_77, 6_73, 6_69, 6_64, 6_60, 6_56, 6_55, 6_50, 6_46, 6_41, 6_36, 6_32, 6_27, 6_22, 6_18, 6_13, 6_12, 6_07, 6_02, 5_96, 5_91, 5_86, 5_80, 5_75, 5_70, 5_69, 5_63, 5_57, 5_51, 5_45, 5_39, 5_33, 5_27, 5_26, 5_19, 5_12, 5_05, 4_98, 4_91, 4_84, 4_83, 4_74, 4_66, 4_57, 4_49, 4_40, 4_39, 4_28, 4_18, 4_07, 3_96, 3_95, 3_81, 3_66, 3_52, 3_51, 3_30, 3_08, 3_07, 2_86, 2_64, 2_63, 2_42, 2_20, 2_19, 1_76, 1_75, 1_32, 1_31, 88, 44, 0, ] __A : List[str] = [ 9_99, 9_91, 9_82, 9_74, 9_66, 9_58, 9_50, 9_41, 9_33, 9_25, 9_16, 9_08, 9_00, 8_99, 8_74, 8_50, 8_25, 8_00, 7_99, 7_00, 6_00, 5_00, 4_00, 3_00, 2_00, 1_00, 0, ] __A : Dict = [ 9_99, 9_92, 9_85, 9_78, 9_71, 9_64, 9_57, 9_49, 9_42, 9_35, 9_28, 9_21, 9_14, 9_07, 9_00, 8_99, 8_79, 8_59, 8_40, 8_20, 8_00, 7_99, 7_66, 7_33, 7_00, 6_99, 6_50, 6_00, 5_99, 5_00, 4_99, 4_00, 3_99, 3_00, 2_99, 2_00, 1_99, 1_00, 99, 0, ] __A : str = [ 9_99, 9_96, 9_92, 9_89, 9_85, 9_82, 9_79, 9_75, 9_72, 9_68, 9_65, 9_61, 9_58, 9_55, 9_51, 9_48, 9_44, 9_41, 9_38, 9_34, 9_31, 9_27, 9_24, 9_20, 9_17, 9_14, 9_10, 9_07, 9_03, 9_00, 8_99, 8_91, 8_84, 8_76, 8_69, 8_61, 8_53, 8_46, 8_38, 8_30, 8_23, 8_15, 8_08, 8_00, 7_99, 7_88, 7_77, 7_66, 7_55, 7_44, 7_33, 7_22, 7_11, 7_00, 6_99, 6_88, 6_77, 6_66, 6_55, 6_44, 6_33, 6_22, 6_11, 6_00, 5_99, 5_85, 5_71, 5_57, 5_42, 5_28, 5_14, 5_00, 4_99, 4_85, 4_71, 4_57, 4_42, 4_28, 4_14, 4_00, 3_99, 3_79, 3_59, 3_40, 3_20, 3_00, 2_99, 2_79, 2_59, 2_40, 2_20, 2_00, 1_99, 1_66, 1_33, 1_00, 99, 66, 33, 0, ]
49
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE :int = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE :Dict = [ 'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'WavLMForAudioFrameClassification', 'WavLMForCTC', 'WavLMForSequenceClassification', 'WavLMForXVector', 'WavLMModel', 'WavLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
15
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" __A = args.pruning_method __A = args.threshold __A = args.model_name_or_path.rstrip("/" ) __A = args.target_model_path print(F'''Load fine-pruned model from {model_name_or_path}''' ) __A = torch.load(os.path.join(a_ , "pytorch_model.bin" ) ) __A = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: __A = tensor print(F'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: __A = tensor print(F'''Copied layer {name}''' ) elif "bias" in name: __A = tensor print(F'''Copied layer {name}''' ) else: if pruning_method == "magnitude": __A = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A = TopKBinarizer.apply(a_ , a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A = ThresholdBinarizer.apply(a_ , a_ , a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A , __A = -0.1, 1.1 __A = torch.sigmoid(a_ ) __A = s * (r - l) + l __A = s_bar.clamp(min=0.0 , max=1.0 ) __A = tensor * mask print(F'''Pruned layer {name}''' ) else: raise ValueError("Unknown pruning method" ) if target_model_path is None: __A = os.path.join( os.path.dirname(a_ ) , F'''bertarized_{os.path.basename(a_ )}''' ) if not os.path.isdir(a_ ): shutil.copytree(a_ , a_ ) print(F'''\nCreated folder {target_model_path}''' ) torch.save(a_ , os.path.join(a_ , "pytorch_model.bin" ) ) print("\nPruned model saved! See you later!" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser() parser.add_argument( '--pruning_method', choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'], type=str, required=True, help=( 'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,' ' sigmoied_threshold = Soft movement pruning)' ), ) parser.add_argument( '--threshold', type=float, required=False, help=( 'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.' 'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.' 'Not needed for `l0`' ), ) parser.add_argument( '--model_name_or_path', type=str, required=True, help='Folder containing the model that was previously fine-pruned', ) parser.add_argument( '--target_model_path', default=None, type=str, required=False, help='Folder containing the model that was previously fine-pruned', ) SCREAMING_SNAKE_CASE :str = parser.parse_args() main(args)
15
1
import argparse from collections import defaultdict import yaml lowerCAmelCase = 'docs/source/en/_toctree.yml' def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = defaultdict(SCREAMING_SNAKE_CASE ) lowercase__ = [] lowercase__ = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} ) else: new_doc_list.append(SCREAMING_SNAKE_CASE ) lowercase__ = new_doc_list lowercase__ = [key for key, value in counts.items() if value > 1] lowercase__ = [] for duplicate_key in duplicates: lowercase__ = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} ) if len(SCREAMING_SNAKE_CASE ) > 1: raise ValueError( f'{duplicate_key} is present several times in the documentation table of content at ' '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] ) lowercase__ = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : s["title"].lower() ) # "overview" gets special treatment and is always first if len(SCREAMING_SNAKE_CASE ) > 1: raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' ) overview_doc.extend(SCREAMING_SNAKE_CASE ) # Sort return overview_doc def _a ( SCREAMING_SNAKE_CASE=False ): """simple docstring""" with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f: lowercase__ = yaml.safe_load(f.read() ) # Get to the API doc lowercase__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowercase__ = content[api_idx]['''sections'''] # Then to the model doc lowercase__ = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 lowercase__ = api_doc[scheduler_idx]['''sections'''] lowercase__ = clean_doc_toc(SCREAMING_SNAKE_CASE ) lowercase__ = False if new_scheduler_doc != scheduler_doc: lowercase__ = True if overwrite: lowercase__ = new_scheduler_doc if diff: if overwrite: lowercase__ = api_doc with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(SCREAMING_SNAKE_CASE , allow_unicode=SCREAMING_SNAKE_CASE ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) def _a ( SCREAMING_SNAKE_CASE=False ): """simple docstring""" with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f: lowercase__ = yaml.safe_load(f.read() ) # Get to the API doc lowercase__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowercase__ = content[api_idx]['''sections'''] # Then to the model doc lowercase__ = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 lowercase__ = False lowercase__ = api_doc[pipeline_idx]['''sections'''] lowercase__ = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: lowercase__ = pipeline_doc['''section'''] lowercase__ = clean_doc_toc(SCREAMING_SNAKE_CASE ) if overwrite: lowercase__ = new_sub_pipeline_doc new_pipeline_docs.append(SCREAMING_SNAKE_CASE ) # sort overall pipeline doc lowercase__ = clean_doc_toc(SCREAMING_SNAKE_CASE ) if new_pipeline_docs != pipeline_docs: lowercase__ = True if overwrite: lowercase__ = new_pipeline_docs if diff: if overwrite: lowercase__ = api_doc with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(SCREAMING_SNAKE_CASE , allow_unicode=SCREAMING_SNAKE_CASE ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowerCAmelCase = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
359
from abc import ABC, abstractmethod from typing import List, Optional class _a ( UpperCamelCase__ ): def __init__( self: Optional[int] ) -> Union[str, Any]: """simple docstring""" self.test() def lowerCamelCase_ ( self: List[Any] ) -> List[Any]: """simple docstring""" lowercase__ = 0 lowercase__ = False while not completed: if counter == 1: self.reset() lowercase__ = self.advance() if not self.does_advance(UpperCamelCase_ ): raise Exception( '''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' ) lowercase__ , lowercase__ , lowercase__ = self.update(UpperCamelCase_ ) counter += 1 if counter > 10_000: raise Exception('''update() does not fulfill the constraint.''' ) if self.remaining() != 0: raise Exception('''Custom Constraint is not defined correctly.''' ) @abstractmethod def lowerCamelCase_ ( self: int ) -> Any: """simple docstring""" raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int ) -> Optional[int]: """simple docstring""" raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: int ) -> str: """simple docstring""" raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def lowerCamelCase_ ( self: Tuple ) -> List[Any]: """simple docstring""" raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def lowerCamelCase_ ( self: Any ) -> Tuple: """simple docstring""" raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int=False ) -> Any: """simple docstring""" raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class _a ( UpperCamelCase__ ): def __init__( self: str , UpperCamelCase_: List[int] ) -> Tuple: """simple docstring""" super(UpperCamelCase_ , self ).__init__() if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or len(UpperCamelCase_ ) == 0: raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.' ) if any((not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' ) lowercase__ = token_ids lowercase__ = len(self.token_ids ) lowercase__ = -1 # the index of the currently fulfilled step lowercase__ = False def lowerCamelCase_ ( self: Tuple ) -> Tuple: """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: int ) -> Optional[Any]: """simple docstring""" if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase_ )}' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: int ) -> Dict: """simple docstring""" if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase_ )}' ) lowercase__ = False lowercase__ = False lowercase__ = False if self.does_advance(UpperCamelCase_ ): self.fulfilled_idx += 1 lowercase__ = True if self.fulfilled_idx == (self.seqlen - 1): lowercase__ = True lowercase__ = completed else: # failed to make progress. lowercase__ = True self.reset() return stepped, completed, reset def lowerCamelCase_ ( self: Tuple ) -> int: """simple docstring""" lowercase__ = False lowercase__ = 0 def lowerCamelCase_ ( self: Any ) -> Dict: """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def lowerCamelCase_ ( self: Any , UpperCamelCase_: Union[str, Any]=False ) -> Tuple: """simple docstring""" lowercase__ = PhrasalConstraint(self.token_ids ) if stateful: lowercase__ = self.seqlen lowercase__ = self.fulfilled_idx lowercase__ = self.completed return new_constraint class _a : def __init__( self: Union[str, Any] , UpperCamelCase_: List[List[int]] , UpperCamelCase_: int=True ) -> int: """simple docstring""" lowercase__ = max([len(UpperCamelCase_ ) for one in nested_token_ids] ) lowercase__ = {} for token_ids in nested_token_ids: lowercase__ = root for tidx, token_id in enumerate(UpperCamelCase_ ): if token_id not in level: lowercase__ = {} lowercase__ = level[token_id] if no_subsets and self.has_subsets(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError( '''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is''' f' {nested_token_ids}.' ) lowercase__ = root def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> Any: """simple docstring""" lowercase__ = self.trie for current_token in current_seq: lowercase__ = start[current_token] lowercase__ = list(start.keys() ) return next_tokens def lowerCamelCase_ ( self: Any , UpperCamelCase_: Tuple ) -> Optional[Any]: """simple docstring""" lowercase__ = self.next_tokens(UpperCamelCase_ ) return len(UpperCamelCase_ ) == 0 def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Any] ) -> Any: """simple docstring""" lowercase__ = list(root.values() ) if len(UpperCamelCase_ ) == 0: return 1 else: return sum([self.count_leaves(UpperCamelCase_ ) for nn in next_nodes] ) def lowerCamelCase_ ( self: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int ) -> Tuple: """simple docstring""" lowercase__ = self.count_leaves(UpperCamelCase_ ) return len(UpperCamelCase_ ) != leaf_count class _a ( UpperCamelCase__ ): def __init__( self: Optional[int] , UpperCamelCase_: List[List[int]] ) -> List[Any]: """simple docstring""" super(UpperCamelCase_ , self ).__init__() if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or len(UpperCamelCase_ ) == 0: raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' ) if any(not isinstance(UpperCamelCase_ , UpperCamelCase_ ) for token_ids in nested_token_ids ): raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' ) if any( any((not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' ) lowercase__ = DisjunctiveTrie(UpperCamelCase_ ) lowercase__ = nested_token_ids lowercase__ = self.trie.max_height lowercase__ = [] lowercase__ = False def lowerCamelCase_ ( self: Union[str, Any] ) -> str: """simple docstring""" lowercase__ = self.trie.next_tokens(self.current_seq ) if len(UpperCamelCase_ ) == 0: return None else: return token_list def lowerCamelCase_ ( self: str , UpperCamelCase_: int ) -> Tuple: """simple docstring""" if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase_ )}' ) lowercase__ = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def lowerCamelCase_ ( self: int , UpperCamelCase_: int ) -> Dict: """simple docstring""" if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase_ )}' ) lowercase__ = False lowercase__ = False lowercase__ = False if self.does_advance(UpperCamelCase_ ): self.current_seq.append(UpperCamelCase_ ) lowercase__ = True else: lowercase__ = True self.reset() lowercase__ = self.trie.reached_leaf(self.current_seq ) lowercase__ = completed return stepped, completed, reset def lowerCamelCase_ ( self: List[str] ) -> List[Any]: """simple docstring""" lowercase__ = False lowercase__ = [] def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]: """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def lowerCamelCase_ ( self: Any , UpperCamelCase_: Optional[Any]=False ) -> str: """simple docstring""" lowercase__ = DisjunctiveConstraint(self.token_ids ) if stateful: lowercase__ = self.seqlen lowercase__ = self.current_seq lowercase__ = self.completed return new_constraint class _a : def __init__( self: int , UpperCamelCase_: List[Constraint] ) -> Dict: """simple docstring""" lowercase__ = constraints # max # of steps required to fulfill a given constraint lowercase__ = max([c.seqlen for c in constraints] ) lowercase__ = len(UpperCamelCase_ ) lowercase__ = False self.init_state() def lowerCamelCase_ ( self: List[str] ) -> str: """simple docstring""" lowercase__ = [] lowercase__ = None lowercase__ = [constraint.copy(stateful=UpperCamelCase_ ) for constraint in self.constraints] def lowerCamelCase_ ( self: str ) -> Any: """simple docstring""" lowercase__ = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]: """simple docstring""" lowercase__ = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" lowercase__ = constraint.advance() if isinstance(UpperCamelCase_ , UpperCamelCase_ ): token_list.append(UpperCamelCase_ ) elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): token_list.extend(UpperCamelCase_ ) else: lowercase__ = self.inprogress_constraint.advance() if isinstance(UpperCamelCase_ , UpperCamelCase_ ): token_list.append(UpperCamelCase_ ) elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): token_list.extend(UpperCamelCase_ ) if len(UpperCamelCase_ ) == 0: return None else: return token_list def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Optional[List[int]] ) -> Optional[int]: """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint lowercase__ , lowercase__ = self.add(UpperCamelCase_ ) # the entire list of constraints are fulfilled if self.completed: break def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> int: """simple docstring""" if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.' ) lowercase__ , lowercase__ = False, False if self.completed: lowercase__ = True lowercase__ = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state lowercase__ , lowercase__ , lowercase__ = self.inprogress_constraint.update(UpperCamelCase_ ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase_ ) ) lowercase__ = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) lowercase__ = None if len(self.pending_constraints ) == 0: # we're done! lowercase__ = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(UpperCamelCase_ ): lowercase__ , lowercase__ , lowercase__ = pending_constraint.update(UpperCamelCase_ ) if not stepped: raise Exception( '''`constraint.update(token_id)` is not yielding incremental progress, ''' '''even though `constraint.does_advance(token_id)` is true.''' ) if complete: self.complete_constraints.append(UpperCamelCase_ ) lowercase__ = None if not complete and stepped: lowercase__ = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". lowercase__ = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. lowercase__ = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def lowerCamelCase_ ( self: Any , UpperCamelCase_: Optional[Any]=True ) -> Dict: """simple docstring""" lowercase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: lowercase__ = [ constraint.copy(stateful=UpperCamelCase_ ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: lowercase__ = self.inprogress_constraint.copy(stateful=UpperCamelCase_ ) lowercase__ = [constraint.copy() for constraint in self.pending_constraints] return new_state
93
0
"""simple docstring""" from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]: import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join lowercase__: int = '''__test_patch_submodule_mock__''' with patch_submodule(_test_patching , '''os.path.join''' , __UpperCAmelCase ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]: assert _test_patching.open is open lowercase__: Any = '''__test_patch_submodule_builtin_mock__''' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , '''open''' , __UpperCAmelCase ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def SCREAMING_SNAKE_CASE__ ( ) -> int: # pandas.read_csv is not present in _test_patching lowercase__: Union[str, Any] = '''__test_patch_submodule_missing_mock__''' with patch_submodule(_test_patching , '''pandas.read_csv''' , __UpperCAmelCase ): pass def SCREAMING_SNAKE_CASE__ ( ) -> List[str]: # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point lowercase__: Tuple = '''__test_patch_submodule_missing_builtin_mock__''' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , '''len''' , __UpperCAmelCase ) is None with patch_submodule(_test_patching , '''len''' , __UpperCAmelCase ): assert _test_patching.len is mock assert _test_patching.len is len def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]: lowercase__: Union[str, Any] = '''__test_patch_submodule_start_and_stop_mock__''' lowercase__: int = patch_submodule(_test_patching , '''open''' , __UpperCAmelCase ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]: from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join lowercase__: List[Any] = '''__test_patch_submodule_successive_join__''' lowercase__: Tuple = '''__test_patch_submodule_successive_dirname__''' lowercase__: List[str] = '''__test_patch_submodule_successive_rename__''' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , '''os.path.join''' , __UpperCAmelCase ): with patch_submodule(_test_patching , '''os.rename''' , __UpperCAmelCase ): with patch_submodule(_test_patching , '''os.path.dirname''' , __UpperCAmelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , '''os.rename''' , __UpperCAmelCase ): with patch_submodule(_test_patching , '''os.path.join''' , __UpperCAmelCase ): with patch_submodule(_test_patching , '''os.path.dirname''' , __UpperCAmelCase ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def SCREAMING_SNAKE_CASE__ ( ) -> Any: lowercase__: Union[str, Any] = '''__test_patch_submodule_doesnt_exist_mock__''' with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , __UpperCAmelCase ): pass with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , __UpperCAmelCase ): pass
177
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig __A = [ "openmmlab/upernet-convnext-tiny", # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring __A = "UperNetConfig" class UpperCAmelCase (nn.Module ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0 , _UpperCAmelCase = False , _UpperCAmelCase = 1 , ): super().__init__() lowercase__: List[Any] = nn.Convad( in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , bias=_UpperCAmelCase , dilation=_UpperCAmelCase , ) lowercase__: List[Any] = nn.BatchNormad(_UpperCAmelCase ) lowercase__: int = nn.ReLU() def _snake_case ( self , _UpperCAmelCase ): lowercase__: Dict = self.conv(_UpperCAmelCase ) lowercase__: Optional[int] = self.batch_norm(_UpperCAmelCase ) lowercase__: List[Any] = self.activation(_UpperCAmelCase ) return output class UpperCAmelCase (nn.Module ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): super().__init__() lowercase__: int = [ nn.AdaptiveAvgPoolad(_UpperCAmelCase ), UperNetConvModule(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(_UpperCAmelCase ) , _UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: Any = input for layer in self.layers: lowercase__: Any = layer(_UpperCAmelCase ) return hidden_state class UpperCAmelCase (nn.Module ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): super().__init__() lowercase__: int = pool_scales lowercase__: Optional[Any] = align_corners lowercase__: Optional[int] = in_channels lowercase__: Optional[Any] = channels lowercase__: List[Any] = [] for i, pool_scale in enumerate(_UpperCAmelCase ): lowercase__: Optional[int] = UperNetPyramidPoolingBlock(pool_scale=_UpperCAmelCase , in_channels=_UpperCAmelCase , channels=_UpperCAmelCase ) self.blocks.append(_UpperCAmelCase ) self.add_module(str(_UpperCAmelCase ) , _UpperCAmelCase ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: Union[str, Any] = [] for ppm in self.blocks: lowercase__: Tuple = ppm(_UpperCAmelCase ) lowercase__: Any = nn.functional.interpolate( _UpperCAmelCase , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners ) ppm_outs.append(_UpperCAmelCase ) return ppm_outs class UpperCAmelCase (nn.Module ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase ): super().__init__() lowercase__: Optional[int] = config lowercase__: int = config.pool_scales # e.g. (1, 2, 3, 6) lowercase__: Optional[int] = in_channels lowercase__: List[str] = config.hidden_size lowercase__: List[str] = False lowercase__: List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) # PSP Module lowercase__: Dict = UperNetPyramidPoolingModule( self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , ) lowercase__: int = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) # FPN Module lowercase__: List[Any] = nn.ModuleList() lowercase__: Union[str, Any] = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer lowercase__: int = UperNetConvModule(_UpperCAmelCase , self.channels , kernel_size=1 ) lowercase__: Dict = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 ) self.lateral_convs.append(_UpperCAmelCase ) self.fpn_convs.append(_UpperCAmelCase ) lowercase__: Any = UperNetConvModule( len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) def _snake_case ( self ): self.apply(self._init_weights ) def _snake_case ( self , _UpperCAmelCase ): if isinstance(_UpperCAmelCase , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def _snake_case ( self , _UpperCAmelCase ): lowercase__: List[str] = inputs[-1] lowercase__: str = [x] psp_outs.extend(self.psp_modules(_UpperCAmelCase ) ) lowercase__: Dict = torch.cat(_UpperCAmelCase , dim=1 ) lowercase__: Tuple = self.bottleneck(_UpperCAmelCase ) return output def _snake_case ( self , _UpperCAmelCase ): # build laterals lowercase__: Dict = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(_UpperCAmelCase ) ) # build top-down path lowercase__: int = len(_UpperCAmelCase ) for i in range(used_backbone_levels - 1 , 0 , -1 ): lowercase__: str = laterals[i - 1].shape[2:] lowercase__: Optional[int] = laterals[i - 1] + nn.functional.interpolate( laterals[i] , size=_UpperCAmelCase , mode='''bilinear''' , align_corners=self.align_corners ) # build outputs lowercase__: str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 , 0 , -1 ): lowercase__: Any = nn.functional.interpolate( fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners ) lowercase__: int = torch.cat(_UpperCAmelCase , dim=1 ) lowercase__: Tuple = self.fpn_bottleneck(_UpperCAmelCase ) lowercase__: Dict = self.classifier(_UpperCAmelCase ) return output class UpperCAmelCase (nn.Module ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 ): super().__init__() lowercase__: Optional[Any] = config lowercase__: Optional[Any] = config.auxiliary_in_channels lowercase__: List[Any] = config.auxiliary_channels lowercase__: Tuple = config.auxiliary_num_convs lowercase__: Any = config.auxiliary_concat_input lowercase__: Optional[int] = in_index lowercase__: Tuple = (kernel_size // 2) * dilation lowercase__: Tuple = [] convs.append( UperNetConvModule( self.in_channels , self.channels , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , dilation=_UpperCAmelCase ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels , self.channels , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , dilation=_UpperCAmelCase ) ) if self.num_convs == 0: lowercase__: List[Any] = nn.Identity() else: lowercase__: Union[str, Any] = nn.Sequential(*_UpperCAmelCase ) if self.concat_input: lowercase__: Dict = UperNetConvModule( self.in_channels + self.channels , self.channels , kernel_size=_UpperCAmelCase , padding=kernel_size // 2 ) lowercase__: Union[str, Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) def _snake_case ( self ): self.apply(self._init_weights ) def _snake_case ( self , _UpperCAmelCase ): if isinstance(_UpperCAmelCase , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def _snake_case ( self , _UpperCAmelCase ): # just take the relevant feature maps lowercase__: Dict = encoder_hidden_states[self.in_index] lowercase__: Optional[int] = self.convs(_UpperCAmelCase ) if self.concat_input: lowercase__: Optional[int] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) ) lowercase__: Dict = self.classifier(_UpperCAmelCase ) return output class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Dict = UperNetConfig _UpperCAmelCase :int = "pixel_values" _UpperCAmelCase :Optional[Any] = True def _snake_case ( self , _UpperCAmelCase ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def _snake_case ( self ): self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = value __A = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" __A = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." ,_UpperCAmelCase ,) class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" def __init__( self , _UpperCAmelCase ): super().__init__(_UpperCAmelCase ) lowercase__: Optional[int] = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) lowercase__: Any = UperNetHead(_UpperCAmelCase , in_channels=self.backbone.channels ) lowercase__: Tuple = UperNetFCNHead(_UpperCAmelCase ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) ) @replace_return_docstrings(output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC ) def _snake_case ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ): lowercase__: Tuple = return_dict if return_dict is not None else self.config.use_return_dict lowercase__: Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase__: str = output_attentions if output_attentions is not None else self.config.output_attentions lowercase__: List[str] = self.backbone.forward_with_filtered_kwargs( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , output_attentions=_UpperCAmelCase ) lowercase__: Tuple = outputs.feature_maps lowercase__: Union[str, Any] = self.decode_head(_UpperCAmelCase ) lowercase__: str = nn.functional.interpolate(_UpperCAmelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_UpperCAmelCase ) lowercase__: Any = None if self.auxiliary_head is not None: lowercase__: Union[str, Any] = self.auxiliary_head(_UpperCAmelCase ) lowercase__: Tuple = nn.functional.interpolate( _UpperCAmelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_UpperCAmelCase ) lowercase__: List[Any] = None if labels is not None: if self.config.num_labels == 1: raise ValueError('''The number of labels should be greater than one''' ) else: # compute weighted loss lowercase__: List[str] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) lowercase__: Optional[Any] = loss_fct(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: Dict = loss_fct(_UpperCAmelCase , _UpperCAmelCase ) lowercase__: int = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: lowercase__: Tuple = (logits,) + outputs[1:] else: lowercase__: Optional[int] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
177
1
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 SCREAMING_SNAKE_CASE__ = sys.version_info >= (3, 10) def lowerCAmelCase__ ( _UpperCamelCase : int=None , _UpperCamelCase : int=None ) -> Optional[int]: """simple docstring""" return field(default_factory=lambda: default , metadata=_a ) @dataclass class lowerCAmelCase_ : """simple docstring""" _lowerCAmelCase : int _lowerCAmelCase : float _lowerCAmelCase : str _lowerCAmelCase : bool @dataclass class lowerCAmelCase_ : """simple docstring""" _lowerCAmelCase : int = 42 _lowerCAmelCase : str = field(default="""toto""" , metadata={"""help""": """help message"""} ) @dataclass class lowerCAmelCase_ : """simple docstring""" _lowerCAmelCase : bool = False _lowerCAmelCase : bool = True _lowerCAmelCase : Optional[bool] = None class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCAmelCase : Tuple = 'titi' _lowerCAmelCase : List[str] = 'toto' class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCAmelCase : Tuple = 'titi' _lowerCAmelCase : List[Any] = 'toto' _lowerCAmelCase : Union[str, Any] = 42 @dataclass class lowerCAmelCase_ : """simple docstring""" _lowerCAmelCase : BasicEnum = "toto" def snake_case ( self ): """simple docstring""" snake_case = BasicEnum(self.foo ) @dataclass class lowerCAmelCase_ : """simple docstring""" _lowerCAmelCase : MixedTypeEnum = "toto" def snake_case ( self ): """simple docstring""" snake_case = MixedTypeEnum(self.foo ) @dataclass class lowerCAmelCase_ : """simple docstring""" _lowerCAmelCase : Optional[int] = None _lowerCAmelCase : Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """help message"""} ) _lowerCAmelCase : Optional[str] = None _lowerCAmelCase : Optional[List[str]] = list_field(default=[] ) _lowerCAmelCase : Optional[List[int]] = list_field(default=[] ) @dataclass class lowerCAmelCase_ : """simple docstring""" _lowerCAmelCase : List[int] = list_field(default=[] ) _lowerCAmelCase : List[int] = list_field(default=[1, 2, 3] ) _lowerCAmelCase : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] ) _lowerCAmelCase : List[float] = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class lowerCAmelCase_ : """simple docstring""" _lowerCAmelCase : List[int] = field() _lowerCAmelCase : str = field() _lowerCAmelCase : BasicEnum = field() def snake_case ( self ): """simple docstring""" snake_case = BasicEnum(self.required_enum ) @dataclass class lowerCAmelCase_ : """simple docstring""" _lowerCAmelCase : int _lowerCAmelCase : "BasicEnum" = field() _lowerCAmelCase : "Optional[bool]" = None _lowerCAmelCase : "str" = field(default="""toto""" , metadata={"""help""": """help message"""} ) _lowerCAmelCase : "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] ) if is_python_no_less_than_3_10: @dataclass class lowerCAmelCase_ : """simple docstring""" _lowerCAmelCase : bool = False _lowerCAmelCase : bool = True _lowerCAmelCase : bool | None = None @dataclass class lowerCAmelCase_ : """simple docstring""" _lowerCAmelCase : int | None = None _lowerCAmelCase : float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """help message"""} ) _lowerCAmelCase : str | None = None _lowerCAmelCase : list[str] | None = list_field(default=[] ) _lowerCAmelCase : list[int] | None = list_field(default=[] ) class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def snake_case ( self , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): snake_case = {k: v for k, v in vars(_SCREAMING_SNAKE_CASE ).items() if k != "container"} snake_case = {k: v for k, v in vars(_SCREAMING_SNAKE_CASE ).items() if k != "container"} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('choices' , _SCREAMING_SNAKE_CASE ) and yy.get('choices' , _SCREAMING_SNAKE_CASE ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['type'](_SCREAMING_SNAKE_CASE ) , yy['type'](_SCREAMING_SNAKE_CASE ) ) del xx["type"], yy["type"] self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def snake_case ( self ): """simple docstring""" snake_case = HfArgumentParser(_SCREAMING_SNAKE_CASE ) snake_case = argparse.ArgumentParser() expected.add_argument('--foo' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE ) expected.add_argument('--bar' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE ) expected.add_argument('--baz' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE ) expected.add_argument('--flag' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , const=_SCREAMING_SNAKE_CASE , nargs='?' ) self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case = ["--foo", "1", "--baz", "quux", "--bar", "0.5"] (snake_case ) = parser.parse_args_into_dataclasses(_SCREAMING_SNAKE_CASE , look_for_args_file=_SCREAMING_SNAKE_CASE ) self.assertFalse(example.flag ) def snake_case ( self ): """simple docstring""" snake_case = HfArgumentParser(_SCREAMING_SNAKE_CASE ) snake_case = argparse.ArgumentParser() expected.add_argument('--foo' , default=42 , type=_SCREAMING_SNAKE_CASE ) expected.add_argument('--baz' , default='toto' , type=_SCREAMING_SNAKE_CASE , help='help message' ) self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def snake_case ( self ): """simple docstring""" snake_case = argparse.ArgumentParser() expected.add_argument('--foo' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , const=_SCREAMING_SNAKE_CASE , nargs='?' ) expected.add_argument('--baz' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , const=_SCREAMING_SNAKE_CASE , nargs='?' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('--no_baz' , action='store_false' , default=_SCREAMING_SNAKE_CASE , dest='baz' ) expected.add_argument('--opt' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE ) snake_case = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_SCREAMING_SNAKE_CASE ) for dataclass_type in dataclass_types: snake_case = HfArgumentParser(_SCREAMING_SNAKE_CASE ) self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case = parser.parse_args([] ) self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) ) snake_case = parser.parse_args(['--foo', '--no_baz'] ) self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) ) snake_case = parser.parse_args(['--foo', '--baz'] ) self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) ) snake_case = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] ) self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) ) snake_case = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] ) self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , opt=_SCREAMING_SNAKE_CASE ) ) def snake_case ( self ): """simple docstring""" snake_case = HfArgumentParser(_SCREAMING_SNAKE_CASE ) snake_case = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , ) self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) snake_case = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) snake_case = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) snake_case = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) snake_case = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 42 ) snake_case = parser.parse_args_into_dataclasses(['--foo', '42'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def snake_case ( self ): """simple docstring""" @dataclass class lowerCAmelCase_ : """simple docstring""" _lowerCAmelCase : Literal["titi", "toto", 42] = "toto" snake_case = HfArgumentParser(_SCREAMING_SNAKE_CASE ) snake_case = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , ) self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) snake_case = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) snake_case = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 42 ) def snake_case ( self ): """simple docstring""" snake_case = HfArgumentParser(_SCREAMING_SNAKE_CASE ) snake_case = argparse.ArgumentParser() expected.add_argument('--foo_int' , nargs='+' , default=[] , type=_SCREAMING_SNAKE_CASE ) expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=_SCREAMING_SNAKE_CASE ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_SCREAMING_SNAKE_CASE ) expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=_SCREAMING_SNAKE_CASE ) self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case = parser.parse_args([] ) self.assertEqual( _SCREAMING_SNAKE_CASE , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , ) snake_case = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() ) self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) ) def snake_case ( self ): """simple docstring""" snake_case = argparse.ArgumentParser() expected.add_argument('--foo' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE ) expected.add_argument('--bar' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='help message' ) expected.add_argument('--baz' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE ) expected.add_argument('--ces' , nargs='+' , default=[] , type=_SCREAMING_SNAKE_CASE ) expected.add_argument('--des' , nargs='+' , default=[] , type=_SCREAMING_SNAKE_CASE ) snake_case = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_SCREAMING_SNAKE_CASE ) for dataclass_type in dataclass_types: snake_case = HfArgumentParser(_SCREAMING_SNAKE_CASE ) self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case = parser.parse_args([] ) self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=_SCREAMING_SNAKE_CASE , bar=_SCREAMING_SNAKE_CASE , baz=_SCREAMING_SNAKE_CASE , ces=[] , des=[] ) ) snake_case = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() ) self.assertEqual(_SCREAMING_SNAKE_CASE , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) ) def snake_case ( self ): """simple docstring""" snake_case = HfArgumentParser(_SCREAMING_SNAKE_CASE ) snake_case = argparse.ArgumentParser() expected.add_argument('--required_list' , nargs='+' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE ) expected.add_argument('--required_str' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_SCREAMING_SNAKE_CASE , ) self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def snake_case ( self ): """simple docstring""" snake_case = HfArgumentParser(_SCREAMING_SNAKE_CASE ) snake_case = argparse.ArgumentParser() expected.add_argument('--foo' , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_SCREAMING_SNAKE_CASE , ) expected.add_argument('--opt' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE ) expected.add_argument('--baz' , default='toto' , type=_SCREAMING_SNAKE_CASE , help='help message' ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_SCREAMING_SNAKE_CASE ) self.argparsersEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def snake_case ( self ): """simple docstring""" snake_case = HfArgumentParser(_SCREAMING_SNAKE_CASE ) snake_case = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } snake_case = parser.parse_dict(_SCREAMING_SNAKE_CASE )[0] snake_case = BasicExample(**_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def snake_case ( self ): """simple docstring""" snake_case = HfArgumentParser(_SCREAMING_SNAKE_CASE ) snake_case = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, "extra": 42, } self.assertRaises(_SCREAMING_SNAKE_CASE , parser.parse_dict , _SCREAMING_SNAKE_CASE , allow_extra_keys=_SCREAMING_SNAKE_CASE ) def snake_case ( self ): """simple docstring""" snake_case = HfArgumentParser(_SCREAMING_SNAKE_CASE ) snake_case = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case = os.path.join(_SCREAMING_SNAKE_CASE , 'temp_json' ) os.mkdir(_SCREAMING_SNAKE_CASE ) with open(temp_local_path + '.json' , 'w+' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0] snake_case = BasicExample(**_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def snake_case ( self ): """simple docstring""" snake_case = HfArgumentParser(_SCREAMING_SNAKE_CASE ) snake_case = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case = os.path.join(_SCREAMING_SNAKE_CASE , 'temp_yaml' ) os.mkdir(_SCREAMING_SNAKE_CASE ) with open(temp_local_path + '.yaml' , 'w+' ) as f: yaml.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0] snake_case = BasicExample(**_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def snake_case ( self ): """simple docstring""" snake_case = HfArgumentParser(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
364
"""simple docstring""" import cmath import math def lowerCAmelCase__ ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> complex: """simple docstring""" snake_case = math.radians(_UpperCamelCase ) snake_case = math.radians(_UpperCamelCase ) # Convert voltage and current to rectangular form snake_case = cmath.rect(_UpperCamelCase , _UpperCamelCase ) snake_case = cmath.rect(_UpperCamelCase , _UpperCamelCase ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
149
0
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig lowerCAmelCase = logging.get_logger(__name__) class _a : def __init__( self: str , UpperCamelCase_: Any , UpperCamelCase_: str ) -> Any: """simple docstring""" lowercase__ = question_encoder lowercase__ = generator lowercase__ = self.question_encoder def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Optional[Any] ) -> Optional[int]: """simple docstring""" if os.path.isfile(UpperCamelCase_ ): raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' ) os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ ) lowercase__ = os.path.join(UpperCamelCase_ , '''question_encoder_tokenizer''' ) lowercase__ = os.path.join(UpperCamelCase_ , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(UpperCamelCase_ ) self.generator.save_pretrained(UpperCamelCase_ ) @classmethod def lowerCamelCase_ ( cls: Any , UpperCamelCase_: List[str] , **UpperCamelCase_: Dict ) -> List[Any]: """simple docstring""" from ..auto.tokenization_auto import AutoTokenizer lowercase__ = kwargs.pop('''config''' , UpperCamelCase_ ) if config is None: lowercase__ = RagConfig.from_pretrained(UpperCamelCase_ ) lowercase__ = AutoTokenizer.from_pretrained( UpperCamelCase_ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) lowercase__ = AutoTokenizer.from_pretrained( UpperCamelCase_ , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=UpperCamelCase_ , generator=UpperCamelCase_ ) def __call__( self: str , *UpperCamelCase_: Union[str, Any] , **UpperCamelCase_: Union[str, Any] ) -> Optional[int]: """simple docstring""" return self.current_tokenizer(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase_ ( self: Any , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ) -> Any: """simple docstring""" return self.generator.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase_ ( self: Any , *UpperCamelCase_: str , **UpperCamelCase_: Tuple ) -> Optional[Any]: """simple docstring""" return self.generator.decode(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase_ ( self: Tuple ) -> List[str]: """simple docstring""" lowercase__ = self.question_encoder def lowerCamelCase_ ( self: str ) -> str: """simple docstring""" lowercase__ = self.generator def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "longest" , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , **UpperCamelCase_: Union[str, Any] , ) -> BatchEncoding: """simple docstring""" warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , UpperCamelCase_ , ) if max_length is None: lowercase__ = self.current_tokenizer.model_max_length lowercase__ = self( UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: lowercase__ = self.current_tokenizer.model_max_length lowercase__ = self( text_target=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , ) lowercase__ = labels['''input_ids'''] return model_inputs
110
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Union[str, Any] = KandinskyVaaImgaImgPipeline _lowercase : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image'''] _lowercase : Any = [ '''image_embeds''', '''negative_image_embeds''', '''image''', ] _lowercase : Union[str, Any] = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _lowercase : Optional[Any] = False @property def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict: """simple docstring""" return 32 @property def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]: """simple docstring""" return 32 @property def lowerCamelCase_ ( self: Any ) -> Any: """simple docstring""" return self.time_input_dim @property def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" return self.time_input_dim * 4 @property def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]: """simple docstring""" return 100 @property def lowerCamelCase_ ( self: int ) -> int: """simple docstring""" torch.manual_seed(0 ) lowercase__ = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } lowercase__ = UNetaDConditionModel(**UpperCamelCase_ ) return model @property def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCamelCase_ ( self: Optional[Any] ) -> int: """simple docstring""" torch.manual_seed(0 ) lowercase__ = VQModel(**self.dummy_movq_kwargs ) return model def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]: """simple docstring""" lowercase__ = self.dummy_unet lowercase__ = self.dummy_movq lowercase__ = { '''num_train_timesteps''': 1_000, '''beta_schedule''': '''linear''', '''beta_start''': 0.00085, '''beta_end''': 0.012, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } lowercase__ = DDIMScheduler(**UpperCamelCase_ ) lowercase__ = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int]=0 ) -> Optional[int]: """simple docstring""" lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( UpperCamelCase_ ) # create init_image lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((256, 256) ) if str(UpperCamelCase_ ).startswith('''mps''' ): lowercase__ = torch.manual_seed(UpperCamelCase_ ) else: lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) lowercase__ = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def lowerCamelCase_ ( self: Optional[int] ) -> Dict: """simple docstring""" lowercase__ = '''cpu''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase_ ) lowercase__ = pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) ) lowercase__ = output.images lowercase__ = pipe( **self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0] lowercase__ = image[0, -3:, -3:, -1] lowercase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array( [0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class _a ( unittest.TestCase ): def lowerCamelCase_ ( self: str ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]: """simple docstring""" lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_img2img_frog.npy''' ) lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) lowercase__ = '''A red cartoon frog, 4k''' lowercase__ = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(UpperCamelCase_ ) lowercase__ = KandinskyVaaImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) lowercase__ = pipeline.to(UpperCamelCase_ ) pipeline.set_progress_bar_config(disable=UpperCamelCase_ ) lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase__ , lowercase__ = pipe_prior( UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() lowercase__ = pipeline( image=UpperCamelCase_ , image_embeds=UpperCamelCase_ , negative_image_embeds=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) lowercase__ = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
110
1
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase_ ( _a , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = CTRLTokenizer SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] a = dict(zip(snake_case_ ,range(len(snake_case_ ) ) ) ) a = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] a = {"""unk_token""": """<unk>"""} a = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] ) a = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp: fp.write(json.dumps(snake_case_ ) + '''\n''' ) with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(snake_case_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,**__lowerCamelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname ,**snake_case_ ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Optional[int] ): '''simple docstring''' a = """adapt react readapt apt""" a = """adapt react readapt apt""" return input_text, output_text def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' a = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) a = """adapt react readapt apt""" a = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() a = tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ ,snake_case_ ) a = tokens + [tokenizer.unk_token] a = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) ,snake_case_ )
371
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Union[str, Any] = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'yolos' def __init__( self : Union[str, Any] ,__lowerCamelCase : int=7_68 ,__lowerCamelCase : Dict=12 ,__lowerCamelCase : Union[str, Any]=12 ,__lowerCamelCase : List[Any]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : int=0.0 ,__lowerCamelCase : str=0.0 ,__lowerCamelCase : Optional[Any]=0.02 ,__lowerCamelCase : int=1e-12 ,__lowerCamelCase : Any=[5_12, 8_64] ,__lowerCamelCase : Tuple=16 ,__lowerCamelCase : int=3 ,__lowerCamelCase : Tuple=True ,__lowerCamelCase : Optional[int]=1_00 ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : List[str]=False ,__lowerCamelCase : int=1 ,__lowerCamelCase : List[Any]=5 ,__lowerCamelCase : Optional[int]=2 ,__lowerCamelCase : int=5 ,__lowerCamelCase : str=2 ,__lowerCamelCase : Tuple=0.1 ,**__lowerCamelCase : List[Any] ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = initializer_range a = layer_norm_eps a = image_size a = patch_size a = num_channels a = qkv_bias a = num_detection_tokens a = use_mid_position_embeddings a = auxiliary_loss # Hungarian matcher a = class_cost a = bbox_cost a = giou_cost # Loss coefficients a = bbox_loss_coefficient a = giou_loss_coefficient a = eos_coefficient class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' return 1e-4 @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return 12
330
0
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) __A : Union[str, Any] = logging.getLogger() def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = '''\n'''.join(_UpperCAmelCase ) Path(_UpperCAmelCase ).open('''w''' ).writelines(_UpperCAmelCase ) __A : Tuple = '''patrickvonplaten/t5-tiny-random''' __A : List[Any] = '''sshleifer/bart-tiny-random''' __A : int = '''sshleifer/tiny-mbart''' __A : List[str] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class _a ( __UpperCAmelCase): """simple docstring""" def lowercase__ ( self : str , __UpperCamelCase : Dict )->Any: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source''' _UpperCAmelCase = input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() _UpperCAmelCase = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'''] _dump_articles(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' ) _UpperCAmelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization''' _UpperCAmelCase = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split() with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ): run_generate() assert Path(__SCREAMING_SNAKE_CASE ).exists() # os.remove(Path(output_file_name)) def lowercase__ ( self : Tuple )->str: self.run_eval_tester(__SCREAMING_SNAKE_CASE ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Dict )->List[Any]: self.run_eval_tester(__SCREAMING_SNAKE_CASE ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def lowercase__ ( self : str , __UpperCamelCase : Any )->Union[str, Any]: _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source''' _UpperCAmelCase = input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() _UpperCAmelCase = { '''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''], '''de''': [ '''Maschinelles Lernen ist großartig, oder?''', '''Ich esse gerne Bananen''', '''Morgen ist wieder ein toller Tag!''', ], } _UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) _UpperCAmelCase = str(tmp_dir / '''scores.json''' ) _UpperCAmelCase = str(tmp_dir / '''val.target''' ) _dump_articles(__SCREAMING_SNAKE_CASE , text['''en'''] ) _dump_articles(__SCREAMING_SNAKE_CASE , text['''de'''] ) _UpperCAmelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization''' _UpperCAmelCase = F'\n run_eval_search.py\n {model}\n {str(__SCREAMING_SNAKE_CASE )}\n {str(__SCREAMING_SNAKE_CASE )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split() testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] ) with patch.object(__SCREAMING_SNAKE_CASE , '''argv''' , __SCREAMING_SNAKE_CASE ): with CaptureStdout() as cs: run_search() _UpperCAmelCase = [''' num_beams | length_penalty''', model, '''Best score args'''] _UpperCAmelCase = ['''Info'''] if "translation" in task: expected_strings.append('''bleu''' ) else: expected_strings.extend(__SCREAMING_SNAKE_CASE ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(__SCREAMING_SNAKE_CASE ).exists() os.remove(Path(__SCREAMING_SNAKE_CASE ) )
260
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING __snake_case :List[Any] = logging.get_logger(__name__) @add_end_docstrings(__UpperCAmelCase ) class _A ( __UpperCAmelCase ): def __init__( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE) requires_backends(self , '''vision''') requires_backends(self , '''torch''') if self.framework != "pt": raise ValueError(F'The {self.__class__} is only available in PyTorch.') self.check_model_type(__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' __a = {} __a = {} __a = {} # preprocess args if "points_per_batch" in kwargs: __a = kwargs['''points_per_batch'''] if "points_per_crop" in kwargs: __a = kwargs['''points_per_crop'''] if "crops_n_layers" in kwargs: __a = kwargs['''crops_n_layers'''] if "crop_overlap_ratio" in kwargs: __a = kwargs['''crop_overlap_ratio'''] if "crop_n_points_downscale_factor" in kwargs: __a = kwargs['''crop_n_points_downscale_factor'''] # postprocess args if "pred_iou_thresh" in kwargs: __a = kwargs['''pred_iou_thresh'''] if "stability_score_offset" in kwargs: __a = kwargs['''stability_score_offset'''] if "mask_threshold" in kwargs: __a = kwargs['''mask_threshold'''] if "stability_score_thresh" in kwargs: __a = kwargs['''stability_score_thresh'''] if "crops_nms_thresh" in kwargs: __a = kwargs['''crops_nms_thresh'''] if "output_rle_mask" in kwargs: __a = kwargs['''output_rle_mask'''] if "output_bboxes_mask" in kwargs: __a = kwargs['''output_bboxes_mask'''] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' return super().__call__(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : float = 512 / 1_500 , __SCREAMING_SNAKE_CASE : Optional[int] = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , ): '''simple docstring''' __a = load_image(__SCREAMING_SNAKE_CASE) __a = self.image_processor.size['''longest_edge'''] __a , __a , __a , __a = self.image_processor.generate_crop_boxes( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''') with self.device_placement(): if self.framework == "pt": __a = self.get_inference_context() with inference_context(): __a = self._ensure_tensor_on_device(__SCREAMING_SNAKE_CASE , device=self.device) __a = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''')) __a = image_embeddings __a = grid_points.shape[1] __a = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( '''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ''' '''To return all points at once, set points_per_batch to None''') for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = grid_points[:, i : i + points_per_batch, :, :] __a = input_labels[:, i : i + points_per_batch] __a = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=0.88 , __SCREAMING_SNAKE_CASE : List[Any]=0.95 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : int=1 , ): '''simple docstring''' __a = model_inputs.pop('''input_boxes''') __a = model_inputs.pop('''is_last''') __a = model_inputs.pop('''original_sizes''').tolist() __a = model_inputs.pop('''reshaped_input_sizes''').tolist() __a = self.model(**__SCREAMING_SNAKE_CASE) # post processing happens here in order to avoid CPU GPU copies of ALL the masks __a = model_outputs['''pred_masks'''] __a = self.image_processor.post_process_masks( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , binarize=__SCREAMING_SNAKE_CASE) __a = model_outputs['''iou_scores'''] __a , __a , __a = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : int=0.7 , ): '''simple docstring''' __a = [] __a = [] __a = [] for model_output in model_outputs: all_scores.append(model_output.pop('''iou_scores''')) all_masks.extend(model_output.pop('''masks''')) all_boxes.append(model_output.pop('''boxes''')) __a = torch.cat(__SCREAMING_SNAKE_CASE) __a = torch.cat(__SCREAMING_SNAKE_CASE) __a , __a , __a , __a = self.image_processor.post_process_for_mask_generation( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = defaultdict(__SCREAMING_SNAKE_CASE) for output in model_outputs: for k, v in output.items(): extra[k].append(__SCREAMING_SNAKE_CASE) __a = {} if output_rle_mask: __a = rle_mask if output_bboxes_mask: __a = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
49
0
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "ClapFeatureExtractor" SCREAMING_SNAKE_CASE_ = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> List[str]: super().__init__(lowerCAmelCase__, lowerCAmelCase__) def __call__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__) -> List[Any]: snake_case_ = kwargs.pop('sampling_rate', lowerCAmelCase__) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.') if text is not None: snake_case_ = self.tokenizer(lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__) if audios is not None: snake_case_ = self.feature_extractor( lowerCAmelCase__, sampling_rate=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__) if text is not None and audios is not None: snake_case_ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase__), tensor_type=lowerCAmelCase__) def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Tuple: return self.tokenizer.batch_decode(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> str: return self.tokenizer.decode(*lowerCAmelCase__, **lowerCAmelCase__) @property def a_ ( self) -> int: snake_case_ = self.tokenizer.model_input_names snake_case_ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
312
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def UpperCAmelCase ( UpperCAmelCase ) -> Dict: # vision encoder if "img_encoder.pos_embed" in name: snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' ) if "img_encoder.patch_embed.proj" in name: snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' ) if "img_encoder.patch_embed.norm" in name: snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' ) if "img_encoder.layers" in name: snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' ) if "blocks" in name and "res" not in name: snake_case_ = name.replace('blocks' , 'layers' ) if "attn" in name and "pre_assign" not in name: snake_case_ = name.replace('attn' , 'self_attn' ) if "proj" in name and "self_attn" in name and "text" not in name: snake_case_ = name.replace('proj' , 'out_proj' ) if "pre_assign_attn.attn.proj" in name: snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' ) if "norm1" in name: snake_case_ = name.replace('norm1' , 'layer_norm1' ) if "norm2" in name and "pre_assign" not in name: snake_case_ = name.replace('norm2' , 'layer_norm2' ) if "img_encoder.norm" in name: snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' ) # text encoder if "text_encoder.token_embedding" in name: snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' ) if "text_encoder.positional_embedding" in name: snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "text_encoder.transformer.resblocks." in name: snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' ) if "ln_1" in name: snake_case_ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: snake_case_ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: snake_case_ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: snake_case_ = name.replace('c_proj' , 'fc2' ) if "text_encoder" in name: snake_case_ = name.replace('text_encoder' , 'text_model' ) if "ln_final" in name: snake_case_ = name.replace('ln_final' , 'final_layer_norm' ) # projection layers if "img_projector.linear_hidden." in name: snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' ) if "img_projector.linear_out." in name: snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' ) if "text_projector.linear_hidden" in name: snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' ) if "text_projector.linear_out" in name: snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' ) return name def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): snake_case_ = orig_state_dict.pop(UpperCAmelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ = key.split('.' ) snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] ) snake_case_ = config.vision_config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[dim : dim * 2, :] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ = key.split('.' ) snake_case_ = int(key_split[3] ) snake_case_ = config.text_config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[ dim : dim * 2, : ] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] else: snake_case_ = rename_key(UpperCAmelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): snake_case_ = val.squeeze_() else: snake_case_ = val return orig_state_dict def UpperCAmelCase ( ) -> Any: snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int: snake_case_ = GroupViTConfig() snake_case_ = GroupViTModel(UpperCAmelCase ).eval() snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model'] snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase ) snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0) # verify result snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' ) snake_case_ = prepare_img() snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' ) with torch.no_grad(): snake_case_ = model(**UpperCAmelCase ) if model_name == "groupvit-gcc-yfcc": snake_case_ = torch.tensor([[13.3_523, 6.3_629]] ) elif model_name == "groupvit-gcc-redcaps": snake_case_ = torch.tensor([[16.1_873, 8.6_230]] ) else: raise ValueError(f'Model name {model_name} not supported.' ) assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 ) processor.save_pretrained(UpperCAmelCase ) model.save_pretrained(UpperCAmelCase ) print('Successfully saved processor and model to' , UpperCAmelCase ) if push_to_hub: print('Pushing to the hub...' ) processor.push_to_hub(UpperCAmelCase , organization='nielsr' ) model.push_to_hub(UpperCAmelCase , organization='nielsr' ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''') parser.add_argument( '''--model_name''', default='''groupvit-gccy-fcc''', type=str, help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''', ) __UpperCamelCase = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
312
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json", } class __lowerCAmelCase ( lowerCamelCase_ ): '''simple docstring''' __UpperCAmelCase : int = 'git_vision_model' def __init__( self , _a=768 , _a=3_072 , _a=12 , _a=12 , _a=3 , _a=224 , _a=16 , _a="quick_gelu" , _a=1E-5 , _a=0.0 , _a=0.02 , **_a , ): super().__init__(**__SCREAMING_SNAKE_CASE ) __a = hidden_size __a = intermediate_size __a = num_hidden_layers __a = num_attention_heads __a = num_channels __a = patch_size __a = image_size __a = initializer_range __a = attention_dropout __a = layer_norm_eps __a = hidden_act @classmethod def __UpperCAmelCase ( cls , _a , **_a ): cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE ) __a = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) # get the vision config dict if we are loading from GITConfig if config_dict.get('''model_type''' ) == "git": __a = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) class __lowerCAmelCase ( lowerCamelCase_ ): '''simple docstring''' __UpperCAmelCase : Tuple = 'git' def __init__( self , _a=None , _a=30_522 , _a=768 , _a=6 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_024 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a=True , _a=False , _a=101 , _a=102 , _a=None , **_a , ): super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if vision_config is None: __a = {} logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' ) __a = GitVisionConfig(**__SCREAMING_SNAKE_CASE ) __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = hidden_act __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = initializer_range __a = layer_norm_eps __a = position_embedding_type __a = use_cache __a = tie_word_embeddings __a = num_image_with_embedding __a = bos_token_id __a = eos_token_id def __UpperCAmelCase ( self ): __a = copy.deepcopy(self.__dict__ ) __a = self.vision_config.to_dict() __a = self.__class__.model_type return output
45
'''simple docstring''' import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): _lowercase : Union[str, Any] = yaml.safe_load( "\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n" ) _lowercase : int = { "name": "root", "text": "", "is_empty_text": True, "subsections": [ { "name": "Dataset Card for My Dataset", "text": "", "is_empty_text": True, "subsections": [ {"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []}, { "name": "Dataset Description", "text": "Some text here.", "is_empty_text": False, "subsections": [ { "name": "Dataset Summary", "text": "Some text here.", "is_empty_text": False, "subsections": [], }, { "name": "Supported Tasks and Leaderboards", "text": "", "is_empty_text": True, "subsections": [], }, {"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []}, ], }, ], } ], } _lowercase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" _lowercase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" _lowercase : Any = { "name": "root", "text": "", "is_empty_text": True, "subsections": [ { "name": "Dataset Card for My Dataset", "text": "", "is_empty_text": True, "subsections": [ {"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []}, { "name": "Dataset Description", "text": "Some text here.", "is_empty_text": False, "subsections": [ { "name": "Dataset Summary", "text": "Some text here.", "is_empty_text": False, "subsections": [ { "name": "Extra Ignored Subsection", "text": "", "is_empty_text": True, "subsections": [], } ], }, { "name": "Supported Tasks and Leaderboards", "text": "", "is_empty_text": True, "subsections": [], }, {"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []}, ], }, ], } ], } _lowercase : str = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" _lowercase : List[str] = ( "The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README." ) _lowercase : Tuple = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" _lowercase : Optional[Any] = ( "The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README." ) _lowercase : Tuple = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" _lowercase : Optional[int] = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README." _lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" _lowercase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)." _lowercase : Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n" _lowercase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'." _lowercase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n" _lowercase : int = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`." _lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n" _lowercase : int = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty." _lowercase : List[str] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" _lowercase : str = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README." _lowercase : Dict = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n" _lowercase : List[str] = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README." _lowercase : str = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" _lowercase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README." _lowercase : List[Any] = "" _lowercase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README." _lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n" _lowercase : Optional[Any] = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections." @pytest.mark.parametrize( '''readme_md, expected_dict''' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" assert ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to_dict() == expected_dict @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path='''root''' ) ) ): lowercase_ : Optional[int] = ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path='''root''' ) ) ): ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( '''readme_md,''' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ): """simple docstring""" ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , suppress_parsing_errors=__SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( '''readme_md, expected_dict''' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / '''README.md''' with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file: readme_file.write(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : str = Path(__SCREAMING_SNAKE_CASE ) / '''README.md''' with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file: readme_file.write(__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = expected_error.format(path=__SCREAMING_SNAKE_CASE ) with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ): lowercase_ : int = ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Dict = Path(__SCREAMING_SNAKE_CASE ) / '''README.md''' with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file: readme_file.write(__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = expected_error.format(path=__SCREAMING_SNAKE_CASE ) with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ): ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( '''readme_md,''' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / '''README.md''' with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file: readme_file.write(__SCREAMING_SNAKE_CASE ) ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , suppress_parsing_errors=__SCREAMING_SNAKE_CASE )
93
0
'''simple docstring''' import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def _lowercase ( __A=None ): '''simple docstring''' if subparsers is not None: __UpperCamelCase = subparsers.add_parser("""env""" ) else: __UpperCamelCase = argparse.ArgumentParser("""Accelerate env command""" ) parser.add_argument( """--config_file""" ,default=_UpperCAmelCase ,help="""The config file to use for the default values in the launching script.""" ) if subparsers is not None: parser.set_defaults(func=_UpperCAmelCase ) return parser def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = torch.__version__ __UpperCamelCase = torch.cuda.is_available() __UpperCamelCase = is_xpu_available() __UpperCamelCase = is_npu_available() __UpperCamelCase = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(_UpperCAmelCase ): __UpperCamelCase = load_config_from_file(args.config_file ).to_dict() __UpperCamelCase = { "`Accelerate` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})", "PyTorch XPU available": str(_UpperCAmelCase ), "PyTorch NPU available": str(_UpperCAmelCase ), "System RAM": f"{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB", } if pt_cuda_available: __UpperCamelCase = torch.cuda.get_device_name() print("""\nCopy-and-paste the text below in your GitHub issue\n""" ) print("""\n""".join([f"- {prop}: {val}" for prop, val in info.items()] ) ) print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" ) __UpperCamelCase = ( "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()] ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else f"\t{accelerate_config}" ) print(_UpperCAmelCase ) __UpperCamelCase = accelerate_config return info def _lowercase ( ): '''simple docstring''' __UpperCamelCase = env_command_parser() __UpperCamelCase = parser.parse_args() env_command(_UpperCAmelCase ) return 0 if __name__ == "__main__": raise SystemExit(main())
365
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer a__ : List[Any] = logging.get_logger(__name__) a__ : str = {'vocab_file': 'vocab.txt'} a__ : Any = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } a__ : Tuple = { 'YituTech/conv-bert-base': 5_1_2, 'YituTech/conv-bert-medium-small': 5_1_2, 'YituTech/conv-bert-small': 5_1_2, } a__ : str = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class UpperCAmelCase__ ( UpperCAmelCase_): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = ConvBertTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> int: super().__init__( lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , ) __UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars ): __UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) ) __UpperCamelCase = do_lower_case __UpperCamelCase = strip_accents __UpperCamelCase = tokenize_chinese_chars __UpperCamelCase = normalizer_class(**lowercase ) __UpperCamelCase = do_lower_case def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple: __UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]: __UpperCamelCase = [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]: __UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase )
243
0
"""simple docstring""" import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() __A : Union[str, Any] = logging.get_logger('''transformers.models.encodec''') __A : Dict = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } __A : str = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } __A : Dict = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } __A : int = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } __A : Optional[Any] = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } __A : Tuple = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } __A : Optional[int] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } __A : Optional[int] = [] __A : Dict = [] def lowercase ( __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : str , __snake_case : Tuple ): for attribute in key.split('''.''' ): lowercase_ : Union[str, Any] = getattr(__snake_case , __snake_case ) if weight_type is not None: lowercase_ : Any = getattr(__snake_case , __snake_case ).shape else: lowercase_ : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase_ : Tuple = value elif weight_type == "weight_g": lowercase_ : Optional[Any] = value elif weight_type == "weight_v": lowercase_ : Tuple = value elif weight_type == "bias": lowercase_ : Optional[int] = value elif weight_type == "running_mean": lowercase_ : Any = value elif weight_type == "running_var": lowercase_ : Optional[int] = value elif weight_type == "num_batches_tracked": lowercase_ : Dict = value elif weight_type == "weight_ih_l0": lowercase_ : Optional[int] = value elif weight_type == "weight_hh_l0": lowercase_ : Tuple = value elif weight_type == "bias_ih_l0": lowercase_ : Optional[int] = value elif weight_type == "bias_hh_l0": lowercase_ : Any = value elif weight_type == "weight_ih_l1": lowercase_ : Optional[int] = value elif weight_type == "weight_hh_l1": lowercase_ : str = value elif weight_type == "bias_ih_l1": lowercase_ : Optional[int] = value elif weight_type == "bias_hh_l1": lowercase_ : str = value else: lowercase_ : List[str] = value logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def lowercase ( __snake_case : int , __snake_case : Dict ): for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowercase_ , lowercase_ : Union[str, Any] = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowercase ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ): lowercase_ : Dict = [] if model_name == "encodec_24khz" or "encodec_32khz": lowercase_ : Dict = MAPPING_24K elif model_name == "encodec_48khz": lowercase_ : Union[str, Any] = MAPPING_48K else: raise ValueError(F'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(__snake_case , __snake_case ): logger.info(F'''{name} was ignored''' ) continue lowercase_ : List[str] = False for key, mapped_key in MAPPING.items(): if "*" in key: lowercase_ , lowercase_ : Dict = key.split('''.*.''' ) if prefix in name and suffix in name: lowercase_ : List[str] = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ): continue lowercase_ : Union[str, Any] = True if "*" in mapped_key: lowercase_ : str = name.split(__snake_case )[0].split('''.''' )[-2] lowercase_ : Any = mapped_key.replace('''*''' , __snake_case ) if "weight_g" in name: lowercase_ : Any = '''weight_g''' elif "weight_v" in name: lowercase_ : Optional[int] = '''weight_v''' elif "weight_ih_l0" in name: lowercase_ : int = '''weight_ih_l0''' elif "weight_hh_l0" in name: lowercase_ : int = '''weight_hh_l0''' elif "bias_ih_l0" in name: lowercase_ : Tuple = '''bias_ih_l0''' elif "bias_hh_l0" in name: lowercase_ : Union[str, Any] = '''bias_hh_l0''' elif "weight_ih_l1" in name: lowercase_ : List[str] = '''weight_ih_l1''' elif "weight_hh_l1" in name: lowercase_ : int = '''weight_hh_l1''' elif "bias_ih_l1" in name: lowercase_ : Tuple = '''bias_ih_l1''' elif "bias_hh_l1" in name: lowercase_ : Optional[Any] = '''bias_hh_l1''' elif "bias" in name: lowercase_ : List[Any] = '''bias''' elif "weight" in name: lowercase_ : Dict = '''weight''' elif "running_mean" in name: lowercase_ : List[str] = '''running_mean''' elif "running_var" in name: lowercase_ : Tuple = '''running_var''' elif "num_batches_tracked" in name: lowercase_ : str = '''num_batches_tracked''' else: lowercase_ : int = None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(F'''Unused weights: {unused_weights}''' ) @torch.no_grad() def lowercase ( __snake_case : List[str] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any]=None , __snake_case : Optional[int]=None , ): if config_path is not None: lowercase_ : List[Any] = EncodecConfig.from_pretrained(__snake_case ) else: lowercase_ : Optional[Any] = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": lowercase_ : Any = [8, 5, 4, 4] lowercase_ : List[str] = [2.2] lowercase_ : Optional[Any] = 6_4 lowercase_ : int = 3_2_0_0_0 lowercase_ : Union[str, Any] = 2_0_4_8 lowercase_ : Union[str, Any] = False lowercase_ : int = False lowercase_ : str = False elif model_name == "encodec_48khz": lowercase_ : str = [8, 5, 4, 2] lowercase_ : str = [3.0, 6.0, 12.0, 24.0] lowercase_ : str = 4_8_0_0_0 lowercase_ : Optional[Any] = 2 lowercase_ : List[str] = False lowercase_ : int = '''time_group_norm''' lowercase_ : List[str] = True lowercase_ : Dict = 1.0 lowercase_ : Union[str, Any] = 0.01 else: raise ValueError(F'''Unknown model name: {model_name}''' ) lowercase_ : Optional[Any] = EncodecModel(__snake_case ) lowercase_ : Dict = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(__snake_case ) lowercase_ : str = torch.load(__snake_case ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights lowercase_ : List[str] = original_checkpoint['''best_state'''] recursively_load_weights(__snake_case , __snake_case , __snake_case ) model.save_pretrained(__snake_case ) if repo_id: print('''Pushing to the hub...''' ) feature_extractor.push_to_hub(__snake_case ) model.push_to_hub(__snake_case ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) __A : Optional[int] = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
33
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def lowerCAmelCase_ ( A_ ,A_ ,A_): UpperCamelCase__: List[Any] = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] UpperCamelCase__: str = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } UpperCamelCase__: str = F"{src_lang}-{tgt_lang}" UpperCamelCase__: Optional[Any] = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(A_ ,exist_ok=A_) UpperCamelCase__: Union[str, Any] = os.path.join(A_ ,"README.md") print(F"Generating {path}") with open(A_ ,"w" ,encoding="utf-8") as f: f.write(A_) # make sure we are under the root of the project A__: Optional[Any] = Path(__file__).resolve().parent.parent.parent A__: Optional[int] = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: A__ , A__ , A__: Optional[Any] = model_name.split('''-''') A__: List[str] = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
149
0
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { "Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json", } class lowercase_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase_ = '''instructblip_vision_model''' def __init__( self : Optional[int] , __lowerCamelCase : Tuple=1_4_0_8 , __lowerCamelCase : Optional[int]=6_1_4_4 , __lowerCamelCase : str=3_9 , __lowerCamelCase : List[Any]=1_6 , __lowerCamelCase : Tuple=2_2_4 , __lowerCamelCase : List[str]=1_4 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : str=1e-6 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : List[Any]=1e-10 , __lowerCamelCase : Any=True , **__lowerCamelCase : List[str] , ): """simple docstring""" super().__init__(**A__ ) _SCREAMING_SNAKE_CASE = hidden_size _SCREAMING_SNAKE_CASE = intermediate_size _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = patch_size _SCREAMING_SNAKE_CASE = image_size _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = attention_dropout _SCREAMING_SNAKE_CASE = layer_norm_eps _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = qkv_bias @classmethod def lowerCAmelCase_ ( cls : str , __lowerCamelCase : Tuple , **__lowerCamelCase : List[str] ): """simple docstring""" cls._set_token_in_kwargs(A__ ) _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = cls.get_config_dict(A__ , **A__ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get("model_type" ) == "instructblip": _SCREAMING_SNAKE_CASE = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(A__ , **A__ ) class lowercase_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase_ = '''instructblip_qformer''' def __init__( self : str , __lowerCamelCase : Optional[Any]=3_0_5_2_2 , __lowerCamelCase : List[Any]=7_6_8 , __lowerCamelCase : Optional[Any]=1_2 , __lowerCamelCase : Tuple=1_2 , __lowerCamelCase : Tuple=3_0_7_2 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=5_1_2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : str=1e-12 , __lowerCamelCase : Dict=0 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Union[str, Any]=1_4_0_8 , **__lowerCamelCase : Any , ): """simple docstring""" super().__init__(pad_token_id=A__ , **A__ ) _SCREAMING_SNAKE_CASE = vocab_size _SCREAMING_SNAKE_CASE = hidden_size _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = intermediate_size _SCREAMING_SNAKE_CASE = hidden_dropout_prob _SCREAMING_SNAKE_CASE = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE = max_position_embeddings _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = layer_norm_eps _SCREAMING_SNAKE_CASE = position_embedding_type _SCREAMING_SNAKE_CASE = cross_attention_frequency _SCREAMING_SNAKE_CASE = encoder_hidden_size @classmethod def lowerCAmelCase_ ( cls : Dict , __lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Union[str, Any] ): """simple docstring""" cls._set_token_in_kwargs(A__ ) _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = cls.get_config_dict(A__ , **A__ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get("model_type" ) == "instructblip": _SCREAMING_SNAKE_CASE = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(A__ , **A__ ) class lowercase_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase_ = '''instructblip''' lowerCamelCase_ = True def __init__( self : int , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=3_2 , **__lowerCamelCase : Union[str, Any] ): """simple docstring""" super().__init__(**A__ ) if vision_config is None: _SCREAMING_SNAKE_CASE = {} logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." ) if qformer_config is None: _SCREAMING_SNAKE_CASE = {} logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." ) if text_config is None: _SCREAMING_SNAKE_CASE = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." ) _SCREAMING_SNAKE_CASE = InstructBlipVisionConfig(**A__ ) _SCREAMING_SNAKE_CASE = InstructBlipQFormerConfig(**A__ ) _SCREAMING_SNAKE_CASE = text_config["model_type"] if "model_type" in text_config else "opt" _SCREAMING_SNAKE_CASE = CONFIG_MAPPING[text_model_type](**A__ ) _SCREAMING_SNAKE_CASE = self.text_config.tie_word_embeddings _SCREAMING_SNAKE_CASE = self.text_config.is_encoder_decoder _SCREAMING_SNAKE_CASE = num_query_tokens _SCREAMING_SNAKE_CASE = self.vision_config.hidden_size _SCREAMING_SNAKE_CASE = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES _SCREAMING_SNAKE_CASE = 1.0 _SCREAMING_SNAKE_CASE = 0.0_2 @classmethod def lowerCAmelCase_ ( cls : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , **__lowerCamelCase : Any , ): """simple docstring""" return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A__ , ) def lowerCAmelCase_ ( self : Dict ): """simple docstring""" _SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) _SCREAMING_SNAKE_CASE = self.vision_config.to_dict() _SCREAMING_SNAKE_CASE = self.qformer_config.to_dict() _SCREAMING_SNAKE_CASE = self.text_config.to_dict() _SCREAMING_SNAKE_CASE = self.__class__.model_type return output
359
'''simple docstring''' from collections.abc import Sequence def SCREAMING_SNAKE_CASE_ ( __A : Sequence[int] | None = None ) -> int: if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) _SCREAMING_SNAKE_CASE = nums[0] for i in range(1 , len(__A ) ): _SCREAMING_SNAKE_CASE = nums[i] _SCREAMING_SNAKE_CASE = max(__A , ans + num , __A ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user lowerCamelCase_ = int(input('Enter number of elements : ').strip()) lowerCamelCase_ = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n] print(max_subsequence_sum(array))
111
0
import random def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Optional[Any] = a[left_index] __UpperCamelCase :Any = left_index + 1 for j in range(left_index + 1 , SCREAMING_SNAKE_CASE ): if a[j] < pivot: __UpperCamelCase , __UpperCamelCase :str = a[i], a[j] i += 1 __UpperCamelCase , __UpperCamelCase :Optional[int] = a[i - 1], a[left_index] return i - 1 def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' if left < right: __UpperCamelCase :int = random.randint(SCREAMING_SNAKE_CASE , right - 1 ) __UpperCamelCase , __UpperCamelCase :List[str] = ( a[left], a[pivot], ) # switches the pivot with the left most bound __UpperCamelCase :Dict = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) quick_sort_random( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point quick_sort_random( SCREAMING_SNAKE_CASE , pivot_index + 1 , SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :Tuple = input('''Enter numbers separated by a comma:\n''' ).strip() __UpperCamelCase :Union[str, Any] = [int(SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )] quick_sort_random(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) ) print(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
43
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch a_ = logging.get_logger(__name__) class __lowerCAmelCase : def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase=None ): '''simple docstring''' if not conversation_id: __lowerCamelCase = uuid.uuida() if past_user_inputs is None: __lowerCamelCase = [] if generated_responses is None: __lowerCamelCase = [] __lowerCamelCase = conversation_id __lowerCamelCase = past_user_inputs __lowerCamelCase = generated_responses __lowerCamelCase = text def __eq__( self , __UpperCAmelCase ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) __lowerCamelCase = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: __lowerCamelCase = text def lowerCamelCase ( self ): '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __lowerCamelCase = None def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' self.generated_responses.append(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): '''simple docstring''' __lowerCamelCase = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): __lowerCamelCase = '''user''' if is_user else '''bot''' output += F"""{name} >> {text} \n""" return output @add_end_docstrings( lowerCAmelCase__ , r""" min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. """ , ) class __lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: __lowerCamelCase = self.tokenizer.eos_token def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = {} __lowerCamelCase = {} __lowerCamelCase = {} if min_length_for_response is not None: __lowerCamelCase = min_length_for_response if minimum_tokens is not None: __lowerCamelCase = minimum_tokens if "max_length" in generate_kwargs: __lowerCamelCase = generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __lowerCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self , __UpperCAmelCase , __UpperCAmelCase=0 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = super().__call__(__UpperCAmelCase , num_workers=__UpperCAmelCase , **__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=32 ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ): __lowerCamelCase = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version __lowerCamelCase = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": __lowerCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": __lowerCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=10 , **__UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) __lowerCamelCase = model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) __lowerCamelCase = max_length - minimum_tokens __lowerCamelCase = model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: __lowerCamelCase = model_inputs['''attention_mask'''][:, -trim:] __lowerCamelCase = model_inputs.pop('''conversation''' ) __lowerCamelCase = max_length __lowerCamelCase = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase ) if self.model.config.is_encoder_decoder: __lowerCamelCase = 1 else: __lowerCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True ): '''simple docstring''' __lowerCamelCase = model_outputs['''output_ids'''] __lowerCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , ) __lowerCamelCase = model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def lowerCamelCase ( self , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.tokenizer.eos_token_id __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: __lowerCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
330
0
def UpperCamelCase ( __magic_name__ : int = 1000 ) -> int: """simple docstring""" lowercase__ = -1 lowercase__ = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c lowercase__ = (n * n - 2 * a * n) // (2 * n - 2 * a) lowercase__ = n - a - b if c * c == (a * a + b * b): lowercase__ = a * b * c if candidate >= product: lowercase__ = candidate return product if __name__ == "__main__": print(F'{solution() = }')
146
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Optional[Any] = logging.get_logger(__name__) A : Tuple = { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class A ( UpperCAmelCase__ ): '''simple docstring''' A__ = '''convbert''' def __init__(self : str , _UpperCAmelCase : Union[str, Any]=3_0522 , _UpperCAmelCase : Dict=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : str=3072 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : str=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Tuple=1E-1_2 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : str=768 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Optional[Any]=9 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] , ) -> List[str]: """simple docstring""" super().__init__( pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = embedding_size lowercase__ = head_ratio lowercase__ = conv_kernel_size lowercase__ = num_groups lowercase__ = classifier_dropout class A ( UpperCAmelCase__ ): '''simple docstring''' @property def lowerCamelCase__ (self : Any ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase__ = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
146
1
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf _lowercase : Optional[Any] = logging.get_logger(__name__) @dataclass class _UpperCAmelCase ( _lowerCAmelCase ): a__ : int = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self : Optional[Any] , **_lowercase : Dict ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __UpperCAmelCase = deprecated_arg[3:] __UpperCAmelCase = not kwargs.pop(_lowercase ) logger.warning( F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' F''' {positive_arg}={kwargs[positive_arg]}''' ) __UpperCAmelCase = kwargs.pop('''tpu_name''' , self.tpu_name ) __UpperCAmelCase = kwargs.pop('''device_idx''' , self.device_idx ) __UpperCAmelCase = kwargs.pop('''eager_mode''' , self.eager_mode ) __UpperCAmelCase = kwargs.pop('''use_xla''' , self.use_xla ) super().__init__(**_lowercase ) a__ : str = field( default=_lowerCAmelCase , metadata={"help": "Name of TPU"} , ) a__ : int = field( default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , ) a__ : bool = field(default=_lowerCAmelCase , metadata={"help": "Benchmark models in eager model."} ) a__ : bool = field( default=_lowerCAmelCase , metadata={ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`." } , ) @cached_property def a ( self : List[str] ): requires_backends(self , ['''tf'''] ) __UpperCAmelCase = None if self.tpu: try: if self.tpu_name: __UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __UpperCAmelCase = None return tpu @cached_property def a ( self : Optional[int] ): requires_backends(self , ['''tf'''] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __UpperCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' ) __UpperCAmelCase = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU __UpperCAmelCase = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' ) return strategy @property def a ( self : Tuple ): requires_backends(self , ['''tf'''] ) return self._setup_tpu is not None @property def a ( self : List[str] ): requires_backends(self , ['''tf'''] ) return self._setup_strategy @property def a ( self : Union[str, Any] ): requires_backends(self , ['''tf'''] ) return tf.config.list_physical_devices('''GPU''' ) @property def a ( self : str ): requires_backends(self , ['''tf'''] ) if self.cuda: return len(self.gpu_list ) return 0 @property def a ( self : Union[str, Any] ): return self.n_gpu > 0
332
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Dict , _lowercase : Union[str, Any] ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): __UpperCAmelCase = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowercase ) def a ( self : str ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : List[str] ): __UpperCAmelCase = '''sgugger/tiny-distilbert-classification''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , only_pretrain_model=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : str ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , torchscript=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def a ( self : Optional[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , fpaa=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : int ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) # set architectures equal to `None` __UpperCAmelCase = None __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : Tuple ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' ) def a ( self : Optional[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowercase , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : Any ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : str ): __UpperCAmelCase = '''sshleifer/tinier_bart''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def a ( self : Union[str, Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : int ): __UpperCAmelCase = '''sshleifer/tinier_bart''' __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase ) __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] ) __UpperCAmelCase = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def a ( self : Optional[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , save_to_csv=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowercase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowercase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowercase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowercase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowercase , '''env.csv''' ) , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowercase , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''train_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''train_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''env.csv''' ) ).exists() ) def a ( self : List[Any] ): __UpperCAmelCase = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowercase : str ): self.assertTrue(hasattr(_lowercase , '''sequential''' ) ) self.assertTrue(hasattr(_lowercase , '''cumulative''' ) ) self.assertTrue(hasattr(_lowercase , '''current''' ) ) self.assertTrue(hasattr(_lowercase , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowercase , '''log.txt''' ) , log_print=_lowercase , trace_memory_line_by_line=_lowercase , multi_process=_lowercase , ) __UpperCAmelCase = PyTorchBenchmark(_lowercase ) __UpperCAmelCase = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowercase , '''log.txt''' ) ).exists() )
332
1
_A = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def __UpperCamelCase ( ): lowerCAmelCase_ = input('''Enter message: ''' ) lowerCAmelCase_ = input('''Enter key [alphanumeric]: ''' ) lowerCAmelCase_ = input('''Encrypt/Decrypt [e/d]: ''' ) if mode.lower().startswith('''e''' ): lowerCAmelCase_ = '''encrypt''' lowerCAmelCase_ = encrypt_message(_A , _A ) elif mode.lower().startswith('''d''' ): lowerCAmelCase_ = '''decrypt''' lowerCAmelCase_ = decrypt_message(_A , _A ) print(f"\n{mode.title()}ed message:" ) print(_A ) def __UpperCamelCase ( _A , _A ): return translate_message(_A , _A , '''encrypt''' ) def __UpperCamelCase ( _A , _A ): return translate_message(_A , _A , '''decrypt''' ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = [] lowerCAmelCase_ = 0 lowerCAmelCase_ = key.upper() for symbol in message: lowerCAmelCase_ = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(_A ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(_A ): lowerCAmelCase_ = 0 else: translated.append(_A ) return "".join(_A ) if __name__ == "__main__": main()
355
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal _A = logging.get_logger(__name__) _A = TypeVar('''DatasetType''', Dataset, IterableDataset) def __UpperCamelCase ( _A , _A = None , _A = None , _A = None , _A = None , _A = "first_exhausted" , ): from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('''Unable to interleave an empty list of datasets.''' ) for i, dataset in enumerate(_A ): if not isinstance(_A , (Dataset, IterableDataset) ): if isinstance(_A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " '''is an empty dataset dictionary.''' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(_A )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_A ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}." ) if i == 0: lowerCAmelCase_ , lowerCAmelCase_ = ( (Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset) ) elif not isinstance(_A , _A ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( _A , _A , _A , info=_A , split=_A , stopping_strategy=_A ) else: return _interleave_iterable_datasets( _A , _A , _A , info=_A , split=_A , stopping_strategy=_A ) def __UpperCamelCase ( _A , _A = None , _A = None , _A = 0 , ): if not dsets: raise ValueError('''Unable to concatenate an empty list of datasets.''' ) for i, dataset in enumerate(_A ): if not isinstance(_A , (Dataset, IterableDataset) ): if isinstance(_A , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " '''is an empty dataset dictionary.''' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(_A )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_A ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}." ) if i == 0: lowerCAmelCase_ , lowerCAmelCase_ = ( (Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset) ) elif not isinstance(_A , _A ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(_A , info=_A , split=_A , axis=_A ) else: return _concatenate_iterable_datasets(_A , info=_A , split=_A , axis=_A )
167
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class lowerCamelCase_ (unittest.TestCase ): '''simple docstring''' def __init__( self : int , A : Dict , A : Optional[int]=7 , A : Tuple=3 , A : Optional[Any]=10 , A : int=18 , A : Dict=30 , A : List[str]=400 , A : int=True , A : Optional[Any]=None , A : Optional[Any]=True , A : List[Any]=[0.5, 0.5, 0.5] , A : List[str]=[0.5, 0.5, 0.5] , A : Optional[int]=None , ): _UpperCAmelCase : Dict = size if size is not None else {"shortest_edge": 18} _UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18} _UpperCAmelCase : Tuple = parent _UpperCAmelCase : Any = batch_size _UpperCAmelCase : Optional[int] = num_channels _UpperCAmelCase : Optional[Any] = num_frames _UpperCAmelCase : Any = image_size _UpperCAmelCase : Dict = min_resolution _UpperCAmelCase : Any = max_resolution _UpperCAmelCase : Optional[int] = do_resize _UpperCAmelCase : str = size _UpperCAmelCase : List[Any] = do_normalize _UpperCAmelCase : Any = image_mean _UpperCAmelCase : Tuple = image_std _UpperCAmelCase : Any = crop_size def _A ( self : List[Any] ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class lowerCamelCase_ (snake_case__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase: Dict = VivitImageProcessor if is_vision_available() else None def _A ( self : int ): _UpperCAmelCase : Tuple = VivitImageProcessingTester(self ) @property def _A ( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def _A ( self : Union[str, Any] ): _UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A , "image_mean" ) ) self.assertTrue(hasattr(A , "image_std" ) ) self.assertTrue(hasattr(A , "do_normalize" ) ) self.assertTrue(hasattr(A , "do_resize" ) ) self.assertTrue(hasattr(A , "do_center_crop" ) ) self.assertTrue(hasattr(A , "size" ) ) def _A ( self : List[Any] ): _UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) _UpperCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def _A ( self : Tuple ): # Initialize image_processing _UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos _UpperCAmelCase : Any = prepare_video_inputs(self.image_processor_tester , equal_resolution=A ) for video in video_inputs: self.assertIsInstance(A , A ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input _UpperCAmelCase : str = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched _UpperCAmelCase : List[Any] = image_processing(A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _A ( self : List[Any] ): # Initialize image_processing _UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , numpify=A ) for video in video_inputs: self.assertIsInstance(A , A ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input _UpperCAmelCase : Tuple = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched _UpperCAmelCase : Optional[int] = image_processing(A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _A ( self : List[Any] ): # Initialize image_processing _UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , torchify=A ) for video in video_inputs: self.assertIsInstance(A , A ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input _UpperCAmelCase : Optional[Any] = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched _UpperCAmelCase : List[Any] = image_processing(A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
31
"""simple docstring""" import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class snake_case ( unittest.TestCase ): a_ : Any = JukeboxTokenizer a_ : Any = { """artist""": """Zac Brown Band""", """genres""": """Country""", """lyrics""": """I met a traveller from an antique land, Who said \"Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away """, } @require_torch def UpperCAmelCase__ ( self) ->Any: import torch a_ = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics") a_ = tokenizer(**self.metas)["input_ids"] # fmt: off a_ = [ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]]), torch.tensor([[0, 0, 0, 10_69, 11]]), torch.tensor([[0, 0, 0, 10_69, 11]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2])) @require_torch def UpperCAmelCase__ ( self) ->Tuple: import torch a_ = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics") a_ = tokenizer(**self.metas)["input_ids"] # fmt: off a_ = [ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]]), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
243
0
import numpy as np class __a : def __init__( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = (0, 0) _UpperCAmelCase = None _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = 0 def __eq__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" return self.position == cell.position def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" print(self.position ) class __a : def __init__( self , _SCREAMING_SNAKE_CASE=(5, 5) ) -> Tuple: """simple docstring""" _UpperCAmelCase = np.zeros(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = world_size[0] _UpperCAmelCase = world_size[1] def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" print(self.w ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" _UpperCAmelCase = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] _UpperCAmelCase = cell.position[0] _UpperCAmelCase = cell.position[1] _UpperCAmelCase = [] for n in neughbour_cord: _UpperCAmelCase = current_x + n[0] _UpperCAmelCase = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: _UpperCAmelCase = Cell() _UpperCAmelCase = (x, y) _UpperCAmelCase = cell neighbours.append(_SCREAMING_SNAKE_CASE ) return neighbours def lowerCAmelCase__ ( a__: List[str] , a__: Any , a__: List[Any] ) -> List[str]: '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = [] _open.append(a__ ) while _open: _UpperCAmelCase = np.argmin([n.f for n in _open] ) _UpperCAmelCase = _open[min_f] _closed.append(_open.pop(a__ ) ) if current == goal: break for n in world.get_neigbours(a__ ): for c in _closed: if c == n: continue _UpperCAmelCase = current.g + 1 _UpperCAmelCase , _UpperCAmelCase = n.position _UpperCAmelCase , _UpperCAmelCase = goal.position _UpperCAmelCase = (ya - ya) ** 2 + (xa - xa) ** 2 _UpperCAmelCase = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(a__ ) _UpperCAmelCase = [] while current.parent is not None: path.append(current.position ) _UpperCAmelCase = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": lowerCAmelCase__ :int = Gridworld() # Start position and goal lowerCAmelCase__ :List[str] = Cell() lowerCAmelCase__ :str = (0, 0) lowerCAmelCase__ :int = Cell() lowerCAmelCase__ :Dict = (4, 4) print(f'''path from {start.position} to {goal.position}''') lowerCAmelCase__ :List[str] = astar(world, start, goal) # Just for visual reasons. for i in s: lowerCAmelCase__ :Dict = 1 print(world.w)
356
import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": lowerCAmelCase__ :Tuple = argparse.ArgumentParser( description=( '''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned''' ''' Distillation''' ) ) parser.add_argument('''--model_type''', default='''bert''', choices=['''bert''']) parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str) parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str) parser.add_argument('''--vocab_transform''', action='''store_true''') lowerCAmelCase__ :Optional[int] = parser.parse_args() if args.model_type == "bert": lowerCAmelCase__ :Tuple = BertForMaskedLM.from_pretrained(args.model_name) lowerCAmelCase__ :Optional[int] = '''bert''' else: raise ValueError('''args.model_type should be "bert".''') lowerCAmelCase__ :Any = model.state_dict() lowerCAmelCase__ :Dict = {} for w in ["word_embeddings", "position_embeddings"]: lowerCAmelCase__ :List[Any] = state_dict[f'''{prefix}.embeddings.{w}.weight'''] for w in ["weight", "bias"]: lowerCAmelCase__ :Union[str, Any] = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}'''] lowerCAmelCase__ :str = 0 for teacher_idx in [0, 2, 4, 7, 9, 1_1]: for w in ["weight", "bias"]: lowerCAmelCase__ :Any = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}''' ] lowerCAmelCase__ :List[Any] = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}''' ] lowerCAmelCase__ :List[str] = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}''' ] lowerCAmelCase__ :List[Any] = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}''' ] lowerCAmelCase__ :int = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}''' ] lowerCAmelCase__ :List[Any] = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}''' ] lowerCAmelCase__ :List[Any] = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}''' ] lowerCAmelCase__ :List[Any] = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}''' ] std_idx += 1 lowerCAmelCase__ :Optional[int] = state_dict['''cls.predictions.decoder.weight'''] lowerCAmelCase__ :List[str] = state_dict['''cls.predictions.bias'''] if args.vocab_transform: for w in ["weight", "bias"]: lowerCAmelCase__ :Any = state_dict[f'''cls.predictions.transform.dense.{w}'''] lowerCAmelCase__ :List[str] = state_dict[f'''cls.predictions.transform.LayerNorm.{w}'''] print(f'''N layers selected for distillation: {std_idx}''') print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
185
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'ClapFeatureExtractor' SCREAMING_SNAKE_CASE__ = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self , _lowerCamelCase , _lowerCamelCase ): super().__init__(_lowerCamelCase , _lowerCamelCase ) def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ): a :Dict = kwargs.pop('''sampling_rate''' , _lowerCamelCase ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: a :Optional[int] = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if audios is not None: a :Tuple = self.feature_extractor( _lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if text is not None and audios is not None: a :Union[str, Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ): return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @property def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.tokenizer.model_input_names a :str = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
94
__UpperCAmelCase : int = [ "Audio", "Array2D", "Array3D", "Array4D", "Array5D", "ClassLabel", "Features", "Sequence", "Value", "Image", "Translation", "TranslationVariableLanguages", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
111
0
"""simple docstring""" from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class UpperCamelCase_ (__A ): __magic_name__ = ['''vqvae'''] def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , ) -> Tuple: super().__init__() self.register_modules(unet=_snake_case , scheduler=_snake_case , mel=_snake_case , vqvae=_snake_case ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: return 50 if isinstance(self.scheduler , _snake_case ) else 1_000 @torch.no_grad() def __call__( self : Optional[int] , lowerCAmelCase_ : Dict = 1 , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : List[str] = 0 , lowerCAmelCase_ : Dict = 0 , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Tuple = 0 , lowerCAmelCase_ : Union[str, Any] = 0 , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : Dict = 0 , lowerCAmelCase_ : Dict = None , lowerCAmelCase_ : Union[str, Any] = None , lowerCAmelCase_ : str=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: UpperCAmelCase_ : Tuple = steps or self.get_default_steps() self.scheduler.set_timesteps(_snake_case ) UpperCAmelCase_ : Union[str, Any] = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: UpperCAmelCase_ : Optional[Any] = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: UpperCAmelCase_ : Optional[Any] = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=_snake_case , device=self.device , ) UpperCAmelCase_ : List[Any] = noise UpperCAmelCase_ : Dict = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(_snake_case , _snake_case ) UpperCAmelCase_ : Any = self.mel.audio_slice_to_image(_snake_case ) UpperCAmelCase_ : Optional[int] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape( (input_image.height, input_image.width) ) UpperCAmelCase_ : Optional[int] = (input_image / 255) * 2 - 1 UpperCAmelCase_ : Optional[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: UpperCAmelCase_ : int = self.vqvae.encode(torch.unsqueeze(_snake_case , 0 ) ).latent_dist.sample( generator=_snake_case )[0] UpperCAmelCase_ : List[str] = self.vqvae.config.scaling_factor * input_images if start_step > 0: UpperCAmelCase_ : Optional[int] = self.scheduler.add_noise(_snake_case , _snake_case , self.scheduler.timesteps[start_step - 1] ) UpperCAmelCase_ : str = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) UpperCAmelCase_ : Any = int(mask_start_secs * pixels_per_second ) UpperCAmelCase_ : int = int(mask_end_secs * pixels_per_second ) UpperCAmelCase_ : int = self.scheduler.add_noise(_snake_case , _snake_case , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , _snake_case ): UpperCAmelCase_ : List[str] = self.unet(_snake_case , _snake_case , _snake_case )["sample"] else: UpperCAmelCase_ : Optional[int] = self.unet(_snake_case , _snake_case )["sample"] if isinstance(self.scheduler , _snake_case ): UpperCAmelCase_ : Tuple = self.scheduler.step( model_output=_snake_case , timestep=_snake_case , sample=_snake_case , eta=_snake_case , generator=_snake_case , )["prev_sample"] else: UpperCAmelCase_ : List[str] = self.scheduler.step( model_output=_snake_case , timestep=_snake_case , sample=_snake_case , generator=_snake_case , )["prev_sample"] if mask is not None: if mask_start > 0: UpperCAmelCase_ : Any = mask[:, step, :, :mask_start] if mask_end > 0: UpperCAmelCase_ : Tuple = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance UpperCAmelCase_ : Tuple = 1 / self.vqvae.config.scaling_factor * images UpperCAmelCase_ : int = self.vqvae.decode(_snake_case )["sample"] UpperCAmelCase_ : Any = (images / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase_ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() UpperCAmelCase_ : List[Any] = (images * 255).round().astype("uint8" ) UpperCAmelCase_ : str = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_snake_case , mode="RGB" ).convert("L" ) for _ in images) ) UpperCAmelCase_ : Tuple = [self.mel.image_to_audio(_snake_case ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(_snake_case )[:, np.newaxis, :] ) , **ImagePipelineOutput(_snake_case ) ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict = 50 ) -> np.ndarray: assert isinstance(self.scheduler , _snake_case ) self.scheduler.set_timesteps(_snake_case ) UpperCAmelCase_ : List[Any] = np.array( [np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] ) UpperCAmelCase_ : List[Any] = (sample / 255) * 2 - 1 UpperCAmelCase_ : Tuple = torch.Tensor(_snake_case ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): UpperCAmelCase_ : List[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps UpperCAmelCase_ : int = self.scheduler.alphas_cumprod[t] UpperCAmelCase_ : Union[str, Any] = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) UpperCAmelCase_ : Optional[int] = 1 - alpha_prod_t UpperCAmelCase_ : Any = self.unet(_snake_case , _snake_case )["sample"] UpperCAmelCase_ : Tuple = (1 - alpha_prod_t_prev) ** 0.5 * model_output UpperCAmelCase_ : List[Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) UpperCAmelCase_ : str = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ) -> torch.Tensor: UpperCAmelCase_ : List[Any] = acos(torch.dot(torch.flatten(_snake_case ) , torch.flatten(_snake_case ) ) / torch.norm(_snake_case ) / torch.norm(_snake_case ) ) return sin((1 - alpha) * theta ) * xa / sin(_snake_case ) + sin(alpha * theta ) * xa / sin(_snake_case )
361
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''microsoft/table-transformer-detection''': ( '''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json''' ), } class UpperCamelCase_ (__A ): __magic_name__ = '''table-transformer''' __magic_name__ = ['''past_key_values'''] __magic_name__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : List[Any] , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Optional[Any]=100 , lowerCAmelCase_ : Optional[int]=6 , lowerCAmelCase_ : List[Any]=2_048 , lowerCAmelCase_ : Tuple=8 , lowerCAmelCase_ : Dict=6 , lowerCAmelCase_ : List[Any]=2_048 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[int]="relu" , lowerCAmelCase_ : List[Any]=256 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=0.0_2 , lowerCAmelCase_ : Any=1.0 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Dict="sine" , lowerCAmelCase_ : Optional[Any]="resnet50" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : List[Any]=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[Any]=0.1 , **lowerCAmelCase_ : Dict , ) -> Union[str, Any]: if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : Dict = backbone_config.get("model_type" ) UpperCAmelCase_ : str = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ : Any = config_class.from_dict(lowerCAmelCase_ ) # set timm attributes to None UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = None, None, None UpperCAmelCase_ : int = use_timm_backbone UpperCAmelCase_ : int = backbone_config UpperCAmelCase_ : Dict = num_channels UpperCAmelCase_ : Optional[Any] = num_queries UpperCAmelCase_ : List[str] = d_model UpperCAmelCase_ : Union[str, Any] = encoder_ffn_dim UpperCAmelCase_ : Optional[Any] = encoder_layers UpperCAmelCase_ : List[str] = encoder_attention_heads UpperCAmelCase_ : int = decoder_ffn_dim UpperCAmelCase_ : int = decoder_layers UpperCAmelCase_ : Optional[int] = decoder_attention_heads UpperCAmelCase_ : List[str] = dropout UpperCAmelCase_ : Dict = attention_dropout UpperCAmelCase_ : Union[str, Any] = activation_dropout UpperCAmelCase_ : Optional[int] = activation_function UpperCAmelCase_ : int = init_std UpperCAmelCase_ : Any = init_xavier_std UpperCAmelCase_ : Union[str, Any] = encoder_layerdrop UpperCAmelCase_ : Dict = decoder_layerdrop UpperCAmelCase_ : Union[str, Any] = encoder_layers UpperCAmelCase_ : Any = auxiliary_loss UpperCAmelCase_ : List[str] = position_embedding_type UpperCAmelCase_ : Dict = backbone UpperCAmelCase_ : Optional[int] = use_pretrained_backbone UpperCAmelCase_ : Tuple = dilation # Hungarian matcher UpperCAmelCase_ : Optional[Any] = class_cost UpperCAmelCase_ : List[Any] = bbox_cost UpperCAmelCase_ : Optional[int] = giou_cost # Loss coefficients UpperCAmelCase_ : Optional[int] = mask_loss_coefficient UpperCAmelCase_ : List[str] = dice_loss_coefficient UpperCAmelCase_ : Union[str, Any] = bbox_loss_coefficient UpperCAmelCase_ : Union[str, Any] = giou_loss_coefficient UpperCAmelCase_ : Dict = eos_coefficient super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return self.encoder_attention_heads @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return self.d_model class UpperCamelCase_ (__A ): __magic_name__ = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float: return 1e-5 @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: return 12
253
0
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : int=None ): """simple docstring""" return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE ) @dataclass class __magic_name__ : A: List[str] = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) A: List[int] = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"}) A: List[int] = list_field( default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) A: bool = field( default=__lowerCAmelCase , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) A: bool = field( default=__lowerCAmelCase , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) A: bool = field( default=__lowerCAmelCase , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."}) A: bool = field(default=__lowerCAmelCase , metadata={"help": "Use FP16 to accelerate inference."}) A: bool = field(default=__lowerCAmelCase , metadata={"help": "Benchmark training of model"}) A: bool = field(default=__lowerCAmelCase , metadata={"help": "Verbose memory tracing"}) A: bool = field( default=__lowerCAmelCase , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) A: bool = field( default=__lowerCAmelCase , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) A: bool = field(default=__lowerCAmelCase , metadata={"help": "Trace memory line by line"}) A: bool = field(default=__lowerCAmelCase , metadata={"help": "Save result to a CSV file"}) A: bool = field(default=__lowerCAmelCase , metadata={"help": "Save all print statements in a log file"}) A: bool = field(default=__lowerCAmelCase , metadata={"help": "Whether to print environment information"}) A: bool = field( default=__lowerCAmelCase , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) A: str = field( default=F"inference_time_{round(time())}.csv" , metadata={"help": "CSV filename used if saving time results to csv."} , ) A: str = field( default=F"inference_memory_{round(time())}.csv" , metadata={"help": "CSV filename used if saving memory results to csv."} , ) A: str = field( default=F"train_time_{round(time())}.csv" , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) A: str = field( default=F"train_memory_{round(time())}.csv" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) A: str = field( default=F"env_info_{round(time())}.csv" , metadata={"help": "CSV filename used if saving environment information."} , ) A: str = field( default=F"log_{round(time())}.csv" , metadata={"help": "Log filename used if print statements are saved in log."} , ) A: int = field(default=3 , metadata={"help": "Times an experiment will be run."}) A: bool = field( default=__lowerCAmelCase , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]: '''simple docstring''' warnings.warn( F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" ''' are deprecated in general and it is advised to use external Benchmarking libraries ''' ''' to benchmark Transformer models.''' , lowerCamelCase__ , ) def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def UpperCAmelCase__ ( self : Any ) -> List[str]: '''simple docstring''' if len(self.models ) <= 0: raise ValueError( '''Please make sure you provide at least one model name / model identifier, *e.g.* `--models''' ''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' ) return self.models @property def UpperCAmelCase__ ( self : Optional[int] ) -> Any: '''simple docstring''' if not self.multi_process: return False elif self.is_tpu: logger.info('''Multiprocessing is currently not possible on TPU.''' ) return False else: return True
146
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ): """simple docstring""" UpperCamelCase__ : List[Any] = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' ) UpperCamelCase__ : str = chkpt['''model'''] # We have the base model one level deeper than the original XLM repository UpperCamelCase__ : int = {} for k, v in state_dict.items(): if "pred_layer" in k: UpperCamelCase__ : Optional[int] = v else: UpperCamelCase__ : Tuple = v UpperCamelCase__ : Union[str, Any] = chkpt['''params'''] UpperCamelCase__ : Optional[Any] = {n: v for n, v in config.items() if not isinstance(SCREAMING_SNAKE_CASE , (torch.FloatTensor, numpy.ndarray) )} UpperCamelCase__ : Dict = chkpt['''dico_word2id'''] UpperCamelCase__ : Dict = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()} # Save pytorch-model UpperCamelCase__ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME UpperCamelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME UpperCamelCase__ : Any = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file'''] print(F"Save PyTorch model to {pytorch_weights_dump_path}" ) torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) print(F"Save configuration file to {pytorch_config_dump_path}" ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + '''\n''' ) print(F"Save vocab file to {pytorch_config_dump_path}" ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + '''\n''' ) if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __UpperCamelCase : Optional[int] = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
146
1
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
301
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ : Tuple = logging.get_logger(__name__) UpperCAmelCase__ : Union[str, Any] = { 'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json', 'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __UpperCamelCase : Optional[int] = '''mobilenet_v1''' def __init__( self : Optional[int] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : str=2_2_4 , lowerCAmelCase_ : List[str]=1.0 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Tuple="relu6" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[int]=0.999 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : List[Any]=0.001 , **lowerCAmelCase_ : Optional[Any] , ): """simple docstring""" super().__init__(**lowerCAmelCase_ ) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''' ) _A: Any = num_channels _A: Optional[int] = image_size _A: Optional[Any] = depth_multiplier _A: Tuple = min_depth _A: Any = hidden_act _A: Dict = tf_padding _A: List[Any] = classifier_dropout_prob _A: Tuple = initializer_range _A: Tuple = layer_norm_eps class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __UpperCamelCase : Dict = version.parse('''1.11''' ) @property def __magic_name__ ( self : Union[str, Any] ): """simple docstring""" return OrderedDict([('''pixel_values''', {0: '''batch'''})] ) @property def __magic_name__ ( self : Optional[Any] ): """simple docstring""" if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})] ) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] ) @property def __magic_name__ ( self : Dict ): """simple docstring""" return 1e-4
301
1
def UpperCamelCase__ ( A__ = 1000 ) -> int: snake_case__ : List[Any] = 2**power snake_case__ : List[str] = str(_UpperCAmelCase ) snake_case__ : Tuple = list(_UpperCAmelCase ) snake_case__ : Union[str, Any] = 0 for i in list_num: sum_of_num += int(_UpperCAmelCase ) return sum_of_num if __name__ == "__main__": lowerCAmelCase__ : List[Any] = int(input('''Enter the power of 2: ''').strip()) print('''2 ^ ''', power, ''' = ''', 2**power) lowerCAmelCase__ : Optional[int] = solution(power) print('''Sum of the digits is: ''', result)
143
"""simple docstring""" from __future__ import annotations from typing import Generic, TypeVar _lowerCamelCase : Any = TypeVar('T') class lowercase ( Generic[T]): def __init__( self : Tuple , _lowerCamelCase : T ): """simple docstring""" A_ : Union[str, Any] = data A_ : List[Any] = self A_ : Optional[Any] = 0 class lowercase ( Generic[T]): def __init__( self : int ): """simple docstring""" A_ : dict[T, DisjointSetTreeNode[T]] = {} def a_ ( self : List[str] , _lowerCamelCase : T ): """simple docstring""" A_ : List[str] = DisjointSetTreeNode(_lowerCamelCase ) def a_ ( self : Dict , _lowerCamelCase : T ): """simple docstring""" A_ : Any = self.map[data] if elem_ref != elem_ref.parent: A_ : Any = self.find_set(elem_ref.parent.data ) return elem_ref.parent def a_ ( self : Union[str, Any] , _lowerCamelCase : DisjointSetTreeNode[T] , _lowerCamelCase : DisjointSetTreeNode[T] ): """simple docstring""" if nodea.rank > nodea.rank: A_ : List[str] = nodea else: A_ : Optional[Any] = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def a_ ( self : Optional[Any] , _lowerCamelCase : T , _lowerCamelCase : T ): """simple docstring""" self.link(self.find_set(_lowerCamelCase ) , self.find_set(_lowerCamelCase ) ) class lowercase ( Generic[T]): def __init__( self : Tuple ): """simple docstring""" A_ : dict[T, dict[T, int]] = {} def a_ ( self : List[Any] , _lowerCamelCase : T ): """simple docstring""" if node not in self.connections: A_ : Tuple = {} def a_ ( self : Optional[Any] , _lowerCamelCase : T , _lowerCamelCase : T , _lowerCamelCase : int ): """simple docstring""" self.add_node(_lowerCamelCase ) self.add_node(_lowerCamelCase ) A_ : int = weight A_ : Dict = weight def a_ ( self : Any ): """simple docstring""" A_ : Tuple = [] A_ : Tuple = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda _lowerCamelCase : x[2] ) # creating the disjoint set A_ : Optional[Any] = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(_lowerCamelCase ) # MST generation A_ : Any = 0 A_ : Optional[int] = 0 A_ : Union[str, Any] = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: A_ , A_ , A_ : int = edges[index] index += 1 A_ : Tuple = disjoint_set.find_set(_lowerCamelCase ) A_ : int = disjoint_set.find_set(_lowerCamelCase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) disjoint_set.union(_lowerCamelCase , _lowerCamelCase ) return graph
167
0
'''simple docstring''' from math import ceil def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Any: '''simple docstring''' snake_case_ = list(range(0, __UpperCAmelCase ) ) snake_case_ = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check snake_case_ = [] for i in device_map_blocks: if device_map_blocks.count(__UpperCAmelCase ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(__UpperCAmelCase ) # Missing blocks snake_case_ = [i for i in blocks if i not in device_map_blocks] snake_case_ = [i for i in device_map_blocks if i not in blocks] if len(__UpperCAmelCase ) != 0: raise ValueError( '''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.''' ''' These attention blocks were specified more than once: ''' + str(__UpperCAmelCase ) ) if len(__UpperCAmelCase ) != 0: raise ValueError( '''There are attention blocks for this model that are not specified in the device_map. Add these attention ''' '''blocks to a device on the device_map: ''' + str(__UpperCAmelCase ) ) if len(__UpperCAmelCase ) != 0: raise ValueError( '''The device_map contains more attention blocks than this model has. Remove these from the device_map:''' + str(__UpperCAmelCase ) ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict: '''simple docstring''' snake_case_ = list(range(__UpperCAmelCase ) ) snake_case_ = int(ceil(n_layers / len(__UpperCAmelCase ) ) ) snake_case_ = [layers[i : i + n_blocks] for i in range(0, __UpperCAmelCase, __UpperCAmelCase )] return dict(zip(__UpperCAmelCase, __UpperCAmelCase ) )
357
'''simple docstring''' import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class a ( _lowerCamelCase ): snake_case_ = 42 snake_case_ = None def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=0.9_9_9, __UpperCAmelCase="cosine", ) -> Dict: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCAmelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCAmelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" ) snake_case_ = [] for i in range(__UpperCAmelCase ): snake_case_ = i / num_diffusion_timesteps snake_case_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ), __UpperCAmelCase ) ) return torch.tensor(__UpperCAmelCase, dtype=torch.floataa ) class a ( _lowerCamelCase , _lowerCamelCase ): @register_to_config def __init__( self : List[str] , lowercase_ : int = 1000 , lowercase_ : str = "fixed_small_log" , lowercase_ : bool = True , lowercase_ : Optional[float] = 1.0 , lowercase_ : str = "epsilon" , lowercase_ : str = "squaredcos_cap_v2" , ): if beta_schedule != "squaredcos_cap_v2": raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' ) snake_case_ = betas_for_alpha_bar(lowercase_ ) snake_case_ = 1.0 - self.betas snake_case_ = torch.cumprod(self.alphas , dim=0 ) snake_case_ = torch.tensor(1.0 ) # standard deviation of the initial noise distribution snake_case_ = 1.0 # setable values snake_case_ = None snake_case_ = torch.from_numpy(np.arange(0 , lowercase_ )[::-1].copy() ) snake_case_ = variance_type def A_ ( self : Optional[Any] , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None ): return sample def A_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Union[str, torch.device] = None ): snake_case_ = num_inference_steps snake_case_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) snake_case_ = (np.arange(0 , lowercase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) snake_case_ = torch.from_numpy(lowercase_ ).to(lowercase_ ) def A_ ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int]=None , lowercase_ : Tuple=None , lowercase_ : Tuple=None ): if prev_timestep is None: snake_case_ = t - 1 snake_case_ = self.alphas_cumprod[t] snake_case_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one snake_case_ = 1 - alpha_prod_t snake_case_ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: snake_case_ = self.betas[t] else: snake_case_ = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample snake_case_ = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: snake_case_ = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": snake_case_ = torch.log(torch.clamp(lowercase_ , min=1e-20 ) ) snake_case_ = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler snake_case_ = variance.log() snake_case_ = beta.log() snake_case_ = (predicted_variance + 1) / 2 snake_case_ = frac * max_log + (1 - frac) * min_log return variance def A_ ( self : List[Any] , lowercase_ : torch.FloatTensor , lowercase_ : int , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None , lowercase_ : int=None , lowercase_ : bool = True , ): snake_case_ = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": snake_case_ ,snake_case_ = torch.split(lowercase_ , sample.shape[1] , dim=1 ) else: snake_case_ = None # 1. compute alphas, betas if prev_timestep is None: snake_case_ = t - 1 snake_case_ = self.alphas_cumprod[t] snake_case_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one snake_case_ = 1 - alpha_prod_t snake_case_ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: snake_case_ = self.betas[t] snake_case_ = self.alphas[t] else: snake_case_ = 1 - alpha_prod_t / alpha_prod_t_prev snake_case_ = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": snake_case_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": snake_case_ = model_output else: raise ValueError( F"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`" ''' for the UnCLIPScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: snake_case_ = torch.clamp( lowercase_ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf snake_case_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t snake_case_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf snake_case_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise snake_case_ = 0 if t > 0: snake_case_ = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=lowercase_ , device=model_output.device ) snake_case_ = self._get_variance( lowercase_ , predicted_variance=lowercase_ , prev_timestep=lowercase_ , ) if self.variance_type == "fixed_small_log": snake_case_ = variance elif self.variance_type == "learned_range": snake_case_ = (0.5 * variance).exp() else: raise ValueError( F"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`" ''' for the UnCLIPScheduler.''' ) snake_case_ = variance * variance_noise snake_case_ = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=lowercase_ , pred_original_sample=lowercase_ ) def A_ ( self : Any , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , lowercase_ : torch.IntTensor , ): # Make sure alphas_cumprod and timestep have same device and dtype as original_samples snake_case_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) snake_case_ = timesteps.to(original_samples.device ) snake_case_ = alphas_cumprod[timesteps] ** 0.5 snake_case_ = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): snake_case_ = sqrt_alpha_prod.unsqueeze(-1 ) snake_case_ = (1 - alphas_cumprod[timesteps]) ** 0.5 snake_case_ = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): snake_case_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) snake_case_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
72
0
"""simple docstring""" import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration __UpperCAmelCase = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] __UpperCAmelCase = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] __UpperCAmelCase = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) __UpperCAmelCase = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) __UpperCAmelCase = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : Any ) -> List[str]: '''simple docstring''' for tf_name, hf_name in patterns: lowerCAmelCase_ :Optional[int] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ ) return k def _snake_case ( lowercase__ : dict , lowercase__ : dict ) -> BigBirdPegasusForConditionalGeneration: '''simple docstring''' lowerCAmelCase_ :int = BigBirdPegasusConfig(**UpperCAmelCase_ ) lowerCAmelCase_ :Any = BigBirdPegasusForConditionalGeneration(UpperCAmelCase_ ) lowerCAmelCase_ :Union[str, Any] = torch_model.state_dict() lowerCAmelCase_ :Tuple = {} # separating decoder weights lowerCAmelCase_ :Dict = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )} lowerCAmelCase_ :str = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )} for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ): lowerCAmelCase_ :Tuple = [k.endswith(UpperCAmelCase_ ) for ending in KEYS_TO_IGNORE] if any(UpperCAmelCase_ ): continue lowerCAmelCase_ :Tuple = DECODER_PATTERNS lowerCAmelCase_ :Optional[int] = rename_state_dict_key(UpperCAmelCase_ , UpperCAmelCase_ ) if new_k not in state_dict: raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): lowerCAmelCase_ :Union[str, Any] = v.T lowerCAmelCase_ :str = torch.from_numpy(UpperCAmelCase_ ) assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ): lowerCAmelCase_ :Optional[Any] = [k.endswith(UpperCAmelCase_ ) for ending in KEYS_TO_IGNORE] if any(UpperCAmelCase_ ): continue lowerCAmelCase_ :Dict = REMAINING_PATTERNS lowerCAmelCase_ :List[str] = rename_state_dict_key(UpperCAmelCase_ , UpperCAmelCase_ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): lowerCAmelCase_ :List[str] = v.T lowerCAmelCase_ :List[str] = torch.from_numpy(UpperCAmelCase_ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" lowerCAmelCase_ :Any = mapping['model.embed_positions.weight'] lowerCAmelCase_ :Union[str, Any] = mapping.pop("""model.embed_positions.weight""" ) lowerCAmelCase_ :List[str] = torch_model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ ) lowerCAmelCase_ :Any = [ k for k in missing if k not in [ 'final_logits_bias', 'model.encoder.embed_tokens.weight', 'model.decoder.embed_tokens.weight', 'lm_head.weight', ] ] assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], f"""no matches found for the following tf keys {extra}""" return torch_model def _snake_case ( lowercase__ : Union[str, Any] ) -> Dict: '''simple docstring''' lowerCAmelCase_ :int = tf.train.list_variables(UpperCAmelCase_ ) lowerCAmelCase_ :List[str] = {} lowerCAmelCase_ :List[Any] = ['global_step'] for name, shape in tqdm(UpperCAmelCase_ , desc="""converting tf checkpoint to dict""" ): lowerCAmelCase_ :Any = any(pat in name for pat in ignore_name ) if skip_key: continue lowerCAmelCase_ :Dict = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase_ :List[Any] = array return tf_weights def _snake_case ( lowercase__ : str , lowercase__ : str , lowercase__ : dict ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ :Optional[int] = get_tf_weights_as_numpy(UpperCAmelCase_ ) lowerCAmelCase_ :List[str] = convert_bigbird_pegasus(UpperCAmelCase_ , UpperCAmelCase_ ) torch_model.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') __UpperCAmelCase = parser.parse_args() __UpperCAmelCase = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
84
'''simple docstring''' import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: if dst_width < 0 or dst_height < 0: raise ValueError('Destination width/height should be > 0' ) __lowerCamelCase : Dict = img __lowerCamelCase : Any = img.shape[1] __lowerCamelCase : Optional[int] = img.shape[0] __lowerCamelCase : Dict = dst_width __lowerCamelCase : str = dst_height __lowerCamelCase : Dict = self.src_w / self.dst_w __lowerCamelCase : List[Any] = self.src_h / self.dst_h __lowerCamelCase : Optional[int] = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55 ) def lowercase_ ( self ) -> List[Any]: for i in range(self.dst_h ): for j in range(self.dst_w ): __lowerCamelCase : Union[str, Any] = self.img[self.get_y(SCREAMING_SNAKE_CASE_ )][self.get_x(SCREAMING_SNAKE_CASE_ )] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> int: return int(self.ratio_x * x ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> int: return int(self.ratio_y * y ) if __name__ == "__main__": A__ , A__ : Optional[Any] = 800, 600 A__ : List[str] = imread("""image_data/lena.jpg""", 1) A__ : List[Any] = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output ) waitKey(0) destroyAllWindows()
185
0
import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _lowerCAmelCase ( _lowerCamelCase ): '''simple docstring''' a_ : Union[str, Any] ="""naver-clova-ix/donut-base-finetuned-docvqa""" a_ : Union[str, Any] =( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) a_ : Union[str, Any] ="""document_qa""" a_ : str =AutoProcessor a_ : Tuple =VisionEncoderDecoderModel a_ : Optional[Any] =["""image""", """text"""] a_ : Optional[int] =["""text"""] def __init__( self : Optional[int] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Tuple ): '''simple docstring''' if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*lowercase_ , **lowercase_ ) def UpperCamelCase_ ( self : Any , UpperCamelCase : "Image" , UpperCamelCase : str ): '''simple docstring''' _snake_case : Tuple = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' _snake_case : List[str] = task_prompt.replace('{user_input}' , lowercase_ ) _snake_case : List[Any] = self.pre_processor.tokenizer( lowercase_ , add_special_tokens=lowercase_ , return_tensors='pt' ).input_ids _snake_case : Any = self.pre_processor(lowercase_ , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def UpperCamelCase_ ( self : str , UpperCamelCase : Dict ): '''simple docstring''' return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=lowercase_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=lowercase_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=lowercase_ , ).sequences def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Tuple ): '''simple docstring''' _snake_case : Optional[Any] = self.pre_processor.batch_decode(lowercase_ )[0] _snake_case : Dict = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) _snake_case : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) _snake_case : List[str] = re.sub(R'<.*?>' , '' , lowercase_ , count=1 ).strip() # remove first task start token _snake_case : List[Any] = self.pre_processor.tokenajson(lowercase_ ) return sequence["answer"]
356
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: Path , lowerCAmelCase: str = None , lowerCAmelCase: str = None , lowerCAmelCase: str = None , )-> List[Any]: if config_name_or_path is None: _snake_case : int = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base' if generator_tokenizer_name_or_path is None: _snake_case : Optional[int] = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: _snake_case : List[str] = question_encoder_name_or_path _snake_case : List[str] = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration # Save model. _snake_case : Any = RagConfig.from_pretrained(lowerCAmelCase ) _snake_case : Tuple = AutoConfig.from_pretrained(lowerCAmelCase ) _snake_case : Any = AutoConfig.from_pretrained(lowerCAmelCase ) _snake_case : int = gen_config _snake_case : Tuple = question_encoder_config _snake_case : int = model_class.from_pretrained_question_encoder_generator( lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase ) rag_model.save_pretrained(lowerCAmelCase ) # Sanity check. model_class.from_pretrained(lowerCAmelCase ) # Save tokenizers. _snake_case : int = AutoTokenizer.from_pretrained(lowerCAmelCase ) gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' ) _snake_case : str = AutoTokenizer.from_pretrained(lowerCAmelCase ) question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( """--model_type""", choices=["""rag_sequence""", """rag_token"""], required=True, type=str, help="""RAG model type: rag_sequence, rag_token""", ) parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""") parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""") parser.add_argument( """--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier""" ) parser.add_argument( """--generator_tokenizer_name_or_path""", type=str, help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""", ) parser.add_argument( """--question_encoder_tokenizer_name_or_path""", type=str, help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""", ) parser.add_argument( """--config_name_or_path""", type=str, help=( """Identifier of the model config to use, if not provided, resolves to a base config for a given""" """ ``model_type``""" ), ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
260
0
'''simple docstring''' import math import os import sys def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : str = '' try: with open(_SCREAMING_SNAKE_CASE , 'rb' ) as binary_file: __a : str = binary_file.read() for dat in data: __a : str = F"""{dat:08b}""" result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def lowerCamelCase (_SCREAMING_SNAKE_CASE : dict[str, str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ): lexicon.pop(_SCREAMING_SNAKE_CASE ) __a : Tuple = last_match_id if math.loga(_SCREAMING_SNAKE_CASE ).is_integer(): for curr_key in lexicon: __a : List[Any] = '0' + lexicon[curr_key] __a : Optional[Any] = bin(_SCREAMING_SNAKE_CASE )[2:] def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ): __a : List[Any] = {'0': '0', '1': '1'} __a , __a : Union[str, Any] = '', '' __a : Dict = len(_SCREAMING_SNAKE_CASE ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __a : Optional[int] = lexicon[curr_string] result += last_match_id add_key_to_lexicon(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) index += 1 __a : List[Any] = '' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": __a : List[Any] = lexicon[curr_string] result += last_match_id return result def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): __a : Any = os.path.getsize(_SCREAMING_SNAKE_CASE ) __a : int = bin(_SCREAMING_SNAKE_CASE )[2:] __a : Dict = len(_SCREAMING_SNAKE_CASE ) return "0" * (length_length - 1) + file_length_binary + compressed def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): __a : Any = 8 try: with open(_SCREAMING_SNAKE_CASE , 'wb' ) as opened_file: __a : Optional[int] = [ to_write[i : i + byte_length] for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(_SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): __a : List[str] = read_file_binary(_SCREAMING_SNAKE_CASE ) __a : Tuple = compress_data(_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = add_file_length(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) write_file_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
27
lowerCAmelCase : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' lowerCAmelCase : Dict = [{'type': 'code', 'content': INSTALL_CONTENT}] lowerCAmelCase : int = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
253
0
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType lowerCAmelCase_ : Optional[List[str]] = None lowerCAmelCase_ : List[Any] = '''<''' if sys.byteorder == '''little''' else '''>''' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image lowerCAmelCase_ : str = [ np.dtype('''|b1'''), np.dtype('''|u1'''), np.dtype('''<u2'''), np.dtype('''>u2'''), np.dtype('''<i2'''), np.dtype('''>i2'''), np.dtype('''<u4'''), np.dtype('''>u4'''), np.dtype('''<i4'''), np.dtype('''>i4'''), np.dtype('''<f4'''), np.dtype('''>f4'''), np.dtype('''<f8'''), np.dtype('''>f8'''), ] @dataclass class __lowerCAmelCase : snake_case : bool = True snake_case : Optional[str] = None # Automatically constructed snake_case : ClassVar[str] = "PIL.Image.Image" snake_case : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} ) snake_case : str = field(default="""Image""" , init=__a , repr=__a ) def __call__(self ): return self.pa_type def snake_case_ (self , lowerCAmelCase__ ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : List[Any] = np.array(lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): return {"path": value, "bytes": None} elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): return {"path": None, "bytes": value} elif isinstance(lowerCAmelCase__ , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(lowerCAmelCase__ ) elif isinstance(lowerCAmelCase__ , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(lowerCAmelCase__ ) elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( F"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=None ): if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support decoding images, please install 'Pillow'.""" ) if token_per_repo_id is None: _UpperCAmelCase : Optional[int] = {} _UpperCAmelCase , _UpperCAmelCase : Dict = value["""path"""], value["""bytes"""] if bytes_ is None: if path is None: raise ValueError(F"An image should have one of 'path' or 'bytes' but both are None in {value}." ) else: if is_local_path(lowerCAmelCase__ ): _UpperCAmelCase : int = PIL.Image.open(lowerCAmelCase__ ) else: _UpperCAmelCase : Optional[int] = path.split("""::""" )[-1] try: _UpperCAmelCase : str = string_to_dict(lowerCAmelCase__ , config.HUB_DATASETS_URL )["""repo_id"""] _UpperCAmelCase : int = token_per_repo_id.get(lowerCAmelCase__ ) except ValueError: _UpperCAmelCase : int = None with xopen(lowerCAmelCase__ , """rb""" , use_auth_token=lowerCAmelCase__ ) as f: _UpperCAmelCase : Tuple = BytesIO(f.read() ) _UpperCAmelCase : List[Any] = PIL.Image.open(bytes_ ) else: _UpperCAmelCase : Dict = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def snake_case_ (self ): from .features import Value return ( self if self.decode else { "bytes": Value("""binary""" ), "path": Value("""string""" ), } ) def snake_case_ (self , lowerCAmelCase__ ): if pa.types.is_string(storage.type ): _UpperCAmelCase : Union[str, Any] = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.binary() ) _UpperCAmelCase : int = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): _UpperCAmelCase : Dict = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.string() ) _UpperCAmelCase : int = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: _UpperCAmelCase : Union[str, Any] = storage.field("""bytes""" ) else: _UpperCAmelCase : Dict = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: _UpperCAmelCase : Optional[int] = storage.field("""path""" ) else: _UpperCAmelCase : int = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.string() ) _UpperCAmelCase : int = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): _UpperCAmelCase : str = pa.array( [encode_np_array(np.array(lowerCAmelCase__ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) _UpperCAmelCase : Union[str, Any] = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.string() ) _UpperCAmelCase : Any = pa.StructArray.from_arrays( [bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowerCAmelCase__ , self.pa_type ) def snake_case_ (self , lowerCAmelCase__ ): @no_op_if_value_is_null def path_to_bytes(lowerCAmelCase__ ): with xopen(lowerCAmelCase__ , """rb""" ) as f: _UpperCAmelCase : Dict = f.read() return bytes_ _UpperCAmelCase : List[str] = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) _UpperCAmelCase : Any = pa.array( [os.path.basename(lowerCAmelCase__ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , ) _UpperCAmelCase : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowerCAmelCase__ , self.pa_type ) def __A ( ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() _UpperCAmelCase : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def __A ( lowerCAmelCase_ ): _UpperCAmelCase : str = BytesIO() if image.format in list_image_compression_formats(): _UpperCAmelCase : int = image.format else: _UpperCAmelCase : Optional[int] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF""" image.save(lowerCAmelCase_ , format=lowerCAmelCase_ ) return buffer.getvalue() def __A ( lowerCAmelCase_ ): if hasattr(lowerCAmelCase_ , """filename""" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def __A ( lowerCAmelCase_ ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) _UpperCAmelCase : List[Any] = array.dtype _UpperCAmelCase : Any = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER _UpperCAmelCase : Optional[int] = dtype.kind _UpperCAmelCase : Tuple = dtype.itemsize _UpperCAmelCase : Any = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: _UpperCAmelCase : List[Any] = np.dtype("""|u1""" ) if dtype_kind not in ["u", "i"]: raise TypeError( f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." ) if dtype is not dest_dtype: warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: _UpperCAmelCase : Optional[int] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: _UpperCAmelCase : Tuple = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ ) _UpperCAmelCase : Union[str, Any] = np.dtype(lowerCAmelCase_ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" ) _UpperCAmelCase : Dict = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) ) return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def __A ( lowerCAmelCase_ ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if objs: _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = first_non_null_value(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(lowerCAmelCase_ , np.ndarray ): _UpperCAmelCase : Optional[int] = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] elif isinstance(lowerCAmelCase_ , PIL.Image.Image ): _UpperCAmelCase : List[Any] = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] else: return objs else: return objs
170
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __lowerCAmelCase ( __a ): def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ ): super().__init__() # make sure scheduler can always be converted to DDIM _UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) @torch.no_grad() def __call__(self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 5_0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ): # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , lowerCAmelCase__ ): _UpperCAmelCase : str = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: _UpperCAmelCase : int = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size: raise ValueError( F"You have passed a list of generators of length {len(lowerCAmelCase__ )}, but requested an effective batch" F" size of {batch_size}. Make sure the batch size matches the length of the generators." ) _UpperCAmelCase : Optional[Any] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCAmelCase__ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output _UpperCAmelCase : str = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 _UpperCAmelCase : List[str] = self.scheduler.step( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , eta=lowerCAmelCase__ , use_clipped_model_output=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample _UpperCAmelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 ) _UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _UpperCAmelCase : str = self.numpy_to_pil(lowerCAmelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase__ )
170
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE_ = { '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
301
"""simple docstring""" import os from distutils.util import strtobool def lowercase (_lowerCAmelCase , _lowerCAmelCase ): for e in env_keys: __lowerCAmelCase = int(os.environ.get(_lowerCAmelCase , -1 ) ) if val >= 0: return val return default def lowercase (_lowerCAmelCase , _lowerCAmelCase=False ): __lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) ) return strtobool(_lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int... def lowercase (_lowerCAmelCase , _lowerCAmelCase="no" ): __lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) ) return value
301
1
"""simple docstring""" import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __SCREAMING_SNAKE_CASE ( UpperCamelCase ): def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' A__ : Tuple = tempfile.mkdtemp() A__ : List[Any] = 8 # DPR tok A__ : str = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] A__ : Union[str, Any] = os.path.join(self.tmpdirname , """dpr_tokenizer""" ) os.makedirs(snake_case , exist_ok=snake_case ) A__ : str = os.path.join(snake_case , DPR_VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) # BART tok A__ : Optional[int] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] A__ : Tuple = dict(zip(snake_case , range(len(snake_case ) ) ) ) A__ : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] A__ : Any = {"""unk_token""": """<unk>"""} A__ : List[Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" ) os.makedirs(snake_case , exist_ok=snake_case ) A__ : List[Any] = os.path.join(snake_case , BART_VOCAB_FILES_NAMES["""vocab_file"""] ) A__ : List[Any] = os.path.join(snake_case , BART_VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(snake_case ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(snake_case ) ) def _UpperCamelCase ( self : Dict ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) ) def _UpperCamelCase ( self : List[str] ): '''simple docstring''' return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) ) def _UpperCamelCase ( self : int ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) ) def _UpperCamelCase ( self : int ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _UpperCamelCase ( self : Dict ): '''simple docstring''' A__ : List[str] = Dataset.from_dict( { """id""": ["""0""", """1"""], """text""": ["""foo""", """bar"""], """title""": ["""Foo""", """Bar"""], """embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _UpperCamelCase ( self : Dict ): '''simple docstring''' A__ : Any = self.get_dummy_dataset() A__ : int = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset: A__ : Any = dataset A__ : Any = RagRetriever( snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _UpperCamelCase ( self : List[str] , snake_case : bool ): '''simple docstring''' A__ : Dict = self.get_dummy_dataset() A__ : Union[str, Any] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""custom""" , ) if from_disk: A__ : Any = os.path.join(self.tmpdirname , """dataset""" ) A__ : Tuple = os.path.join(self.tmpdirname , """index.faiss""" ) dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname , """index.faiss""" ) ) dataset.drop_index("""embeddings""" ) dataset.save_to_disk(os.path.join(self.tmpdirname , """dataset""" ) ) del dataset A__ : Tuple = RagRetriever( snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: A__ : int = RagRetriever( snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case ) , ) return retriever def _UpperCamelCase ( self : str ): '''simple docstring''' A__ : Optional[Any] = Dataset.from_dict( { """id""": ["""0""", """1"""], """text""": ["""foo""", """bar"""], """title""": ["""Foo""", """Bar"""], """embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT ) A__ : List[str] = os.path.join(self.tmpdirname , """hf_bert_base.hnswSQ8_correct_phi_128.c_index""" ) dataset.save_faiss_index("""embeddings""" , index_file_name + """.index.dpr""" ) pickle.dump(dataset["""id"""] , open(index_file_name + """.index_meta.dpr""" , """wb""" ) ) A__ : int = os.path.join(self.tmpdirname , """psgs_w100.tsv.pkl""" ) A__ : Dict = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset} pickle.dump(snake_case , open(snake_case , """wb""" ) ) A__ : Optional[int] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""legacy""" , index_path=self.tmpdirname , ) A__ : List[Any] = RagRetriever( snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' A__ : Dict = 1 A__ : str = self.get_dummy_canonical_hf_index_retriever() A__ : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) A__ : Optional[Any] = retriever.retrieve(snake_case , n_docs=snake_case ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) , snake_case ) self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _UpperCamelCase ( self : Optional[Any] ): '''simple docstring''' A__ : List[str] = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset: A__ : Dict = self.get_dummy_dataset() retriever.save_pretrained(snake_case ) A__ : List[str] = RagRetriever.from_pretrained(snake_case ) self.assertIsInstance(snake_case , snake_case ) A__ : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) A__ : Union[str, Any] = retriever.retrieve(snake_case , n_docs=1 ) self.assertTrue(out is not None ) def _UpperCamelCase ( self : Optional[int] ): '''simple docstring''' A__ : Union[str, Any] = 1 A__ : int = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case ) A__ : List[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) A__ : Optional[int] = retriever.retrieve(snake_case , n_docs=snake_case ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) , snake_case ) self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _UpperCamelCase ( self : Dict ): '''simple docstring''' A__ : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case ) A__ : Dict = RagRetriever.from_pretrained(snake_case ) self.assertIsInstance(snake_case , snake_case ) A__ : List[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) A__ : List[Any] = retriever.retrieve(snake_case , n_docs=1 ) self.assertTrue(out is not None ) def _UpperCamelCase ( self : Any ): '''simple docstring''' A__ : Tuple = 1 A__ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case ) A__ : Any = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) A__ : List[Any] = retriever.retrieve(snake_case , n_docs=snake_case ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) , snake_case ) self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' A__ : int = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case ) A__ : Optional[int] = RagRetriever.from_pretrained(snake_case ) self.assertIsInstance(snake_case , snake_case ) A__ : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) A__ : Any = retriever.retrieve(snake_case , n_docs=1 ) self.assertTrue(out is not None ) def _UpperCamelCase ( self : List[Any] ): '''simple docstring''' A__ : List[str] = 1 A__ : Tuple = self.get_dummy_legacy_index_retriever() A__ : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) A__ : int = retriever.retrieve(snake_case , n_docs=snake_case ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["""text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""text"""] ) , snake_case ) self.assertEqual(doc_dicts[0]["""text"""][0] , """bar""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""text"""][0] , """foo""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _UpperCamelCase ( self : int ): '''simple docstring''' A__ : List[str] = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case ) A__ : Union[str, Any] = RagRetriever.from_pretrained(snake_case ) self.assertIsInstance(snake_case , snake_case ) A__ : Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) A__ : List[str] = retriever.retrieve(snake_case , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' import torch A__ : Tuple = 1 A__ : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever() A__ : Union[str, Any] = [[5, 7], [10, 11]] A__ : Optional[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) A__ : Optional[Any] = retriever(snake_case , snake_case , prefix=retriever.config.generator.prefix , n_docs=snake_case ) A__ : List[str] = ( out["""context_input_ids"""], out["""context_attention_mask"""], out["""retrieved_doc_embeds"""], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case , snake_case ) self.assertIsInstance(snake_case , snake_case ) self.assertIsInstance(snake_case , np.ndarray ) A__ : Union[str, Any] = retriever( snake_case , snake_case , prefix=retriever.config.generator.prefix , n_docs=snake_case , return_tensors="""pt""" , ) A__ : List[Any] = ( # noqa: F841 out["""context_input_ids"""], out["""context_attention_mask"""], out["""retrieved_doc_embeds"""], out["""doc_ids"""], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case , torch.Tensor ) self.assertIsInstance(snake_case , torch.Tensor ) self.assertIsInstance(snake_case , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _UpperCamelCase ( self : Dict ): '''simple docstring''' A__ : Any = self.get_dpr_ctx_encoder_tokenizer() A__ : str = 1 A__ : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case ) retriever.set_ctx_encoder_tokenizer(snake_case ) A__ : List[str] = [[5, 7], [10, 11]] A__ : Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) A__ : Dict = retriever(snake_case , snake_case , prefix=retriever.config.generator.prefix , n_docs=snake_case ) self.assertEqual( len(snake_case ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) , snake_case ) # check for doc token related keys in dictionary.
369
"""simple docstring""" import numpy as np class __SCREAMING_SNAKE_CASE : def __init__( self : List[str] ): '''simple docstring''' A__ : Optional[int] = (0, 0) A__ : Dict = None A__ : int = 0 A__ : str = 0 A__ : Optional[Any] = 0 def __eq__( self : str , snake_case : Optional[int] ): '''simple docstring''' return self.position == cell.position def _UpperCamelCase ( self : List[str] ): '''simple docstring''' print(self.position ) class __SCREAMING_SNAKE_CASE : def __init__( self : int , snake_case : Any=(5, 5) ): '''simple docstring''' A__ : Optional[int] = np.zeros(snake_case ) A__ : List[Any] = world_size[0] A__ : Dict = world_size[1] def _UpperCamelCase ( self : Any ): '''simple docstring''' print(self.w ) def _UpperCamelCase ( self : Optional[int] , snake_case : List[Any] ): '''simple docstring''' A__ : int = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] A__ : int = cell.position[0] A__ : str = cell.position[1] A__ : Any = [] for n in neughbour_cord: A__ : List[Any] = current_x + n[0] A__ : Tuple = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: A__ : List[Any] = Cell() A__ : str = (x, y) A__ : Optional[Any] = cell neighbours.append(snake_case ) return neighbours def _lowerCAmelCase ( UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict ) ->Dict: A__ : Union[str, Any] = [] A__ : Optional[int] = [] _open.append(UpperCAmelCase__ ) while _open: A__ : List[Any] = np.argmin([n.f for n in _open] ) A__ : Union[str, Any] = _open[min_f] _closed.append(_open.pop(UpperCAmelCase__ ) ) if current == goal: break for n in world.get_neigbours(UpperCAmelCase__ ): for c in _closed: if c == n: continue A__ : Dict = current.g + 1 A__ , A__ : int = n.position A__ , A__ : Optional[int] = goal.position A__ : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2 A__ : Optional[int] = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(UpperCAmelCase__ ) A__ : List[str] = [] while current.parent is not None: path.append(current.position ) A__ : Union[str, Any] = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": A_ = Gridworld() # Start position and goal A_ = Cell() A_ = (0, 0) A_ = Cell() A_ = (4, 4) print(F'path from {start.position} to {goal.position}') A_ = astar(world, start, goal) # Just for visual reasons. for i in s: A_ = 1 print(world.w)
296
0
from __future__ import annotations from collections.abc import Iterator class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__(self : Tuple , UpperCAmelCase_ : int) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: List[str] =value lowerCamelCase__: Node | None =None lowerCamelCase__: Node | None =None class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__(self : Optional[int] , UpperCAmelCase_ : Node) ->Any: '''simple docstring''' lowerCamelCase__: List[str] =tree def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Node | None) ->int: '''simple docstring''' if node is None: return 0 return node.value + ( self.depth_first_search(node.left) + self.depth_first_search(node.right) ) def __iter__(self : int) ->List[str]: '''simple docstring''' yield self.depth_first_search(self.tree) if __name__ == "__main__": import doctest doctest.testmod()
10
"""simple docstring""" from __future__ import annotations def snake_case_ ( A_ : str ): '''simple docstring''' return [ord(A_ ) - 96 for elem in plain] def snake_case_ ( A_ : list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''', A_ ) print('''Decoded:''', decode(A_ ) ) if __name__ == "__main__": main()
72
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : str = { "microsoft/unispeech-sat-base-100h-libri-ft": ( "https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class __magic_name__ ( lowerCAmelCase_ ): SCREAMING_SNAKE_CASE = 'unispeech-sat' def __init__( self , __snake_case=32 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.02 , __snake_case=1e-5 , __snake_case="group" , __snake_case="gelu" , __snake_case=(512, 512, 512, 512, 512, 512, 512) , __snake_case=(5, 2, 2, 2, 2, 2, 2) , __snake_case=(10, 3, 3, 3, 3, 2, 2) , __snake_case=False , __snake_case=128 , __snake_case=16 , __snake_case=False , __snake_case=True , __snake_case=0.05 , __snake_case=10 , __snake_case=2 , __snake_case=0.0 , __snake_case=10 , __snake_case=0 , __snake_case=320 , __snake_case=2 , __snake_case=0.1 , __snake_case=100 , __snake_case=256 , __snake_case=256 , __snake_case=0.1 , __snake_case="mean" , __snake_case=False , __snake_case=False , __snake_case=256 , __snake_case=(512, 512, 512, 512, 1500) , __snake_case=(5, 3, 3, 1, 1) , __snake_case=(1, 2, 3, 1, 1) , __snake_case=512 , __snake_case=0 , __snake_case=1 , __snake_case=2 , __snake_case=504 , **__snake_case , ) -> Optional[Any]: '''simple docstring''' super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case ) __a =hidden_size __a =feat_extract_norm __a =feat_extract_activation __a =list(__snake_case ) __a =list(__snake_case ) __a =list(__snake_case ) __a =conv_bias __a =num_conv_pos_embeddings __a =num_conv_pos_embedding_groups __a =len(self.conv_dim ) __a =num_hidden_layers __a =intermediate_size __a =hidden_act __a =num_attention_heads __a =hidden_dropout __a =attention_dropout __a =activation_dropout __a =feat_proj_dropout __a =final_dropout __a =layerdrop __a =layer_norm_eps __a =initializer_range __a =vocab_size __a =num_clusters __a =do_stable_layer_norm __a =use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __a =apply_spec_augment __a =mask_time_prob __a =mask_time_length __a =mask_time_min_masks __a =mask_feature_prob __a =mask_feature_length __a =mask_feature_min_masks # parameters for pretraining with codevector quantized representations __a =num_codevectors_per_group __a =num_codevector_groups __a =contrastive_logits_temperature __a =feat_quantizer_dropout __a =num_negatives __a =codevector_dim __a =proj_codevector_dim __a =diversity_loss_weight # ctc loss __a =ctc_loss_reduction __a =ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. __a =classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __a =list(__snake_case ) __a =list(__snake_case ) __a =list(__snake_case ) __a =xvector_output_dim @property def __magic_name__ ( self ) -> List[str]: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
308
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCAmelCase : int = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = ["BartphoTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys _lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
308
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class A ( UpperCAmelCase_ ): __UpperCAmelCase : List[str] = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) __UpperCAmelCase : Tuple = 'CIDAS/clipseg-rd64-refined' __UpperCAmelCase : str = 'image_segmenter' __UpperCAmelCase : Any = CLIPSegForImageSegmentation __UpperCAmelCase : str = ['image', 'text'] __UpperCAmelCase : Tuple = ['image'] def __init__(self : Union[str, Any] , *__UpperCAmelCase : int , **__UpperCAmelCase : Dict ) -> Optional[int]: """simple docstring""" requires_backends(self , ["vision"] ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) def lowercase_ (self : List[str] , __UpperCAmelCase : "Image" , __UpperCAmelCase : str ) -> Tuple: """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=__UpperCAmelCase , return_tensors="pt" ) def lowercase_ (self : Optional[int] , __UpperCAmelCase : str ) -> int: """simple docstring""" with torch.no_grad(): UpperCAmelCase__ = self.model(**__UpperCAmelCase ).logits return logits def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : int ) -> int: """simple docstring""" UpperCAmelCase__ = outputs.cpu().detach().numpy() UpperCAmelCase__ = 0 UpperCAmelCase__ = 1 return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
65
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _UpperCAmelCase = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) _UpperCAmelCase = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) _UpperCAmelCase = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) _UpperCAmelCase = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) _UpperCAmelCase = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) _UpperCAmelCase = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) _UpperCAmelCase = key.replace('''image_encoder.module''' , '''flava.image_model''' ) _UpperCAmelCase = key.replace('''text_encoder.module''' , '''flava.text_model''' ) _UpperCAmelCase = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) _UpperCAmelCase = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) _UpperCAmelCase = key.replace('''text_projection''' , '''flava.text_projection''' ) _UpperCAmelCase = key.replace('''image_projection''' , '''flava.image_projection''' ) _UpperCAmelCase = value.float() for key, value in codebook_state_dict.items(): _UpperCAmelCase = value return upgrade @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]=None ): '''simple docstring''' if config_path is not None: _UpperCAmelCase = FlavaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = FlavaConfig() _UpperCAmelCase = FlavaForPreTraining(_SCREAMING_SNAKE_CASE ).eval() _UpperCAmelCase = convert_dalle_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , save_checkpoint=_SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) else: _UpperCAmelCase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) _UpperCAmelCase = upgrade_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = hf_model.state_dict() _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) + count_parameters(_SCREAMING_SNAKE_CASE ) assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") __A : Optional[Any] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
260
0
import math class _SCREAMING_SNAKE_CASE : def __init__( self : Tuple , __lowerCamelCase : int=0 ): # a graph with Node 0,1,...,N-1 UpperCamelCase :Optional[int] = n UpperCamelCase :Any = [ [math.inf for j in range(0 , __lowerCamelCase )] for i in range(0 , __lowerCamelCase ) ] # adjacency matrix for weight UpperCamelCase :Optional[int] = [ [math.inf for j in range(0 , __lowerCamelCase )] for i in range(0 , __lowerCamelCase ) ] # dp[i][j] stores minimum distance from i to j def _A ( self : str , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ): UpperCamelCase :int = w def _A ( self : Tuple ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): UpperCamelCase :Tuple = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def _A ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ): return self.dp[u][v] if __name__ == "__main__": UpperCAmelCase_ : List[str] = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
62
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Union[str, Any] = """char""" snake_case__ : Optional[int] = """bpe""" snake_case__ : Dict = """wp""" UpperCAmelCase_ : List[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : List[Any] = ["""image_processor""", """char_tokenizer"""] snake_case__ : Dict = """ViTImageProcessor""" snake_case__ : List[str] = """MgpstrTokenizer""" def __init__( self : Optional[int] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , **__lowerCamelCase : Any ): UpperCamelCase :Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __lowerCamelCase , ) UpperCamelCase :Optional[int] = kwargs.pop("""feature_extractor""" ) UpperCamelCase :List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) UpperCamelCase :Optional[int] = tokenizer UpperCamelCase :int = AutoTokenizer.from_pretrained("""gpt2""" ) UpperCamelCase :int = AutoTokenizer.from_pretrained("""bert-base-uncased""" ) super().__init__(__lowerCamelCase , __lowerCamelCase ) def __call__( self : str , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : str=None , **__lowerCamelCase : Dict ): if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: UpperCamelCase :Tuple = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) if text is not None: UpperCamelCase :Any = self.char_tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) if text is None: return inputs elif images is None: return encodings else: UpperCamelCase :Dict = encodings["""input_ids"""] return inputs def _A ( self : Tuple , __lowerCamelCase : str ): UpperCamelCase , UpperCamelCase , UpperCamelCase :int = sequences UpperCamelCase :Tuple = char_preds.size(0 ) UpperCamelCase , UpperCamelCase :str = self._decode_helper(__lowerCamelCase , """char""" ) UpperCamelCase , UpperCamelCase :List[Any] = self._decode_helper(__lowerCamelCase , """bpe""" ) UpperCamelCase , UpperCamelCase :List[Any] = self._decode_helper(__lowerCamelCase , """wp""" ) UpperCamelCase :Any = [] UpperCamelCase :str = [] for i in range(__lowerCamelCase ): UpperCamelCase :Union[str, Any] = [char_scores[i], bpe_scores[i], wp_scores[i]] UpperCamelCase :Any = [char_strs[i], bpe_strs[i], wp_strs[i]] UpperCamelCase :str = scores.index(max(__lowerCamelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) UpperCamelCase :Optional[Any] = {} UpperCamelCase :Dict = final_strs UpperCamelCase :Union[str, Any] = final_scores UpperCamelCase :List[str] = char_strs UpperCamelCase :Tuple = bpe_strs UpperCamelCase :Optional[Any] = wp_strs return out def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ): if format == DecodeType.CHARACTER: UpperCamelCase :List[str] = self.char_decode UpperCamelCase :Union[str, Any] = 1 UpperCamelCase :Optional[Any] = """[s]""" elif format == DecodeType.BPE: UpperCamelCase :Union[str, Any] = self.bpe_decode UpperCamelCase :str = 2 UpperCamelCase :int = """#""" elif format == DecodeType.WORDPIECE: UpperCamelCase :int = self.wp_decode UpperCamelCase :Any = 102 UpperCamelCase :int = """[SEP]""" else: raise ValueError(F"""Format {format} is not supported.""" ) UpperCamelCase , UpperCamelCase :int = [], [] UpperCamelCase :Any = pred_logits.size(0 ) UpperCamelCase :List[Any] = pred_logits.size(1 ) UpperCamelCase , UpperCamelCase :Optional[int] = pred_logits.topk(1 , dim=-1 , largest=__lowerCamelCase , sorted=__lowerCamelCase ) UpperCamelCase :Optional[Any] = preds_index.view(-1 , __lowerCamelCase )[:, 1:] UpperCamelCase :int = decoder(__lowerCamelCase ) UpperCamelCase , UpperCamelCase :Optional[int] = torch.nn.functional.softmax(__lowerCamelCase , dim=2 ).max(dim=2 ) UpperCamelCase :Tuple = preds_max_prob[:, 1:] for index in range(__lowerCamelCase ): UpperCamelCase :Tuple = preds_str[index].find(__lowerCamelCase ) UpperCamelCase :List[Any] = preds_str[index][:pred_eos] UpperCamelCase :List[Any] = preds_index[index].cpu().tolist() UpperCamelCase :Optional[Any] = pred_index.index(__lowerCamelCase ) if eos_token in pred_index else -1 UpperCamelCase :List[str] = preds_max_prob[index][: pred_eos_index + 1] UpperCamelCase :List[str] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__lowerCamelCase ) conf_scores.append(__lowerCamelCase ) return dec_strs, conf_scores def _A ( self : Optional[Any] , __lowerCamelCase : str ): UpperCamelCase :Dict = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(__lowerCamelCase )] return decode_strs def _A ( self : Union[str, Any] , __lowerCamelCase : str ): return self.bpe_tokenizer.batch_decode(__lowerCamelCase ) def _A ( self : int , __lowerCamelCase : Optional[int] ): UpperCamelCase :Any = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(__lowerCamelCase )] return decode_strs
62
1
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class snake_case__ : """simple docstring""" def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=9_9 , __lowercase=3_2 , __lowercase=5 , __lowercase=4 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_0 , __lowercase=0.0_2 , __lowercase=True , __lowercase=None , ) -> Any: """simple docstring""" a__ : List[Any] = parent a__ : Optional[int] = batch_size a__ : Dict = seq_length a__ : Dict = is_training a__ : Union[str, Any] = use_input_mask a__ : List[Any] = vocab_size a__ : str = hidden_size a__ : List[Any] = num_hidden_layers a__ : Optional[int] = num_attention_heads a__ : int = intermediate_size a__ : int = hidden_act a__ : List[str] = hidden_dropout_prob a__ : Dict = attention_probs_dropout_prob a__ : int = max_position_embeddings a__ : Optional[Any] = initializer_range a__ : List[Any] = use_labels a__ : Optional[int] = scope def SCREAMING_SNAKE_CASE__( self ) -> List[str]: """simple docstring""" a__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a__ : Any = None if self.use_input_mask: a__ : int = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: a__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a__ : Dict = self.get_config() return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]: """simple docstring""" return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__lowercase , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE__( self ) -> Any: """simple docstring""" ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ) : int = self.prepare_config_and_inputs() a__ : str = True a__ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase , ) -> List[Any]: """simple docstring""" a__ : List[Any] = BertGenerationEncoder(config=__lowercase ) model.to(__lowercase ) model.eval() a__ : List[Any] = model(__lowercase , attention_mask=__lowercase ) a__ : Dict = model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase , ) -> List[Any]: """simple docstring""" a__ : int = True a__ : str = BertGenerationEncoder(config=__lowercase ) model.to(__lowercase ) model.eval() a__ : Any = model( __lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , ) a__ : Optional[Any] = model( __lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase , ) -> Optional[Any]: """simple docstring""" a__ : Optional[Any] = True a__ : Optional[Any] = True a__ : List[Any] = BertGenerationDecoder(config=__lowercase ).to(__lowercase ).eval() # first forward pass a__ : Any = model( __lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , use_cache=__lowercase , ) a__ : List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids a__ : str = ids_tensor((self.batch_size, 3) , config.vocab_size ) a__ : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and a__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 ) a__ : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) a__ : Dict = model( __lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , output_hidden_states=__lowercase , )["""hidden_states"""][0] a__ : Union[str, Any] = model( __lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , past_key_values=__lowercase , output_hidden_states=__lowercase , )["""hidden_states"""][0] # select random slice a__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item() a__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach() a__ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-3 ) ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , *__lowercase , ) -> Dict: """simple docstring""" a__ : str = BertGenerationDecoder(__lowercase ) model.to(__lowercase ) model.eval() a__ : int = model(__lowercase , attention_mask=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__( self ) -> Dict: """simple docstring""" a__ , a__ , a__ , a__ : Optional[Any] = self.prepare_config_and_inputs() a__ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class snake_case__ (A__ , A__ , A__ , unittest.TestCase ): """simple docstring""" __lowerCAmelCase :List[Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __lowerCAmelCase :str = (BertGenerationDecoder,) if is_torch_available() else () __lowerCAmelCase :int = ( {"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]: """simple docstring""" a__ : int = BertGenerationEncoderTester(self ) a__ : int = ConfigTester(self , config_class=__lowercase , hidden_size=3_7 ) def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__( self ) -> List[str]: """simple docstring""" a__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]: """simple docstring""" a__ , a__ , a__ , a__ : Dict = self.model_tester.prepare_config_and_inputs() a__ : List[str] = """bert""" self.model_tester.create_and_check_model(__lowercase , __lowercase , __lowercase , __lowercase ) def SCREAMING_SNAKE_CASE__( self ) -> Dict: """simple docstring""" a__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__lowercase ) def SCREAMING_SNAKE_CASE__( self ) -> List[Any]: """simple docstring""" a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__lowercase ) def SCREAMING_SNAKE_CASE__( self ) -> Any: """simple docstring""" ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ) : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() a__ : Tuple = None self.model_tester.create_and_check_model_as_decoder( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) def SCREAMING_SNAKE_CASE__( self ) -> int: """simple docstring""" a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__lowercase ) @slow def SCREAMING_SNAKE_CASE__( self ) -> List[Any]: """simple docstring""" a__ : Union[str, Any] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) self.assertIsNotNone(__lowercase ) @require_torch class snake_case__ (unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]: """simple docstring""" a__ : str = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) a__ : List[str] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] ) with torch.no_grad(): a__ : Dict = model(__lowercase )[0] a__ : Optional[Any] = torch.Size([1, 8, 1_0_2_4] ) self.assertEqual(output.shape , __lowercase ) a__ : List[str] = torch.tensor( [[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4 ) ) @require_torch class snake_case__ (unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE__( self ) -> Tuple: """simple docstring""" a__ : Tuple = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) a__ : int = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] ) with torch.no_grad(): a__ : Optional[int] = model(__lowercase )[0] a__ : str = torch.Size([1, 8, 5_0_3_5_8] ) self.assertEqual(output.shape , __lowercase ) a__ : List[str] = torch.tensor( [[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4 ) )
170
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: _lowercase : List[str] =None _lowercase : Union[str, Any] =logging.get_logger(__name__) _lowercase : Optional[int] ={"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _lowercase : Dict ={ "vocab_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json" ), }, } _lowercase : str ={ "moussaKam/mbarthez": 1024, "moussaKam/barthez": 1024, "moussaKam/barthez-orangesum-title": 1024, } _lowercase : Dict ="▁" class snake_case__ (A__ ): """simple docstring""" __lowerCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES __lowerCAmelCase :Optional[int] = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase :Any = ["input_ids", "attention_mask"] __lowerCAmelCase :Any = BarthezTokenizer def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , **__lowercase , ) -> str: """simple docstring""" a__ : int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token super().__init__( __lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , **__lowercase , ) a__ : List[str] = vocab_file a__ : List[Any] = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a__ : Tuple = [self.cls_token_id] a__ : List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[int]: """simple docstring""" a__ : List[Any] = [self.sep_token_id] a__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__lowercase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return a__ : Tuple = os.path.join( __lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
170
1
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def snake_case ( A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ : Optional[Any] = original_name.split("." )[0] UpperCAmelCase_ : Optional[Any] = key.split("." ) UpperCAmelCase_ : str = int(key_list[key_list.index(A__ ) - 2] ) UpperCAmelCase_ : str = int(key_list[key_list.index(A__ ) - 1] ) UpperCAmelCase_ : List[Any] = orig_block_num - offset UpperCAmelCase_ : List[str] = key.replace(F"""{orig_block_num}.{layer_num}.{original_name}""" ,F"""block.{new_block_num}.{layer_num}.{new_name}""" ) return key def snake_case ( A__ ): UpperCAmelCase_ : List[str] = OrderedDict() UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = 0, 0 for key, value in state_dict.items(): if key.startswith("network" ): UpperCAmelCase_ : Tuple = key.replace("network" ,"poolformer.encoder" ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith("bias" ) and "patch_embed" not in key: patch_emb_offset += 1 UpperCAmelCase_ : List[str] = key[: key.find("proj" )] UpperCAmelCase_ : Optional[Any] = key.replace(A__ ,F"""patch_embeddings.{total_embed_found}.""" ) UpperCAmelCase_ : Union[str, Any] = key.replace("proj" ,"projection" ) if key.endswith("bias" ): total_embed_found += 1 if "patch_embeddings" in key: UpperCAmelCase_ : Dict = "poolformer.encoder." + key if "mlp.fc1" in key: UpperCAmelCase_ : List[Any] = replace_key_with_offset(A__ ,A__ ,"mlp.fc1" ,"output.conv1" ) if "mlp.fc2" in key: UpperCAmelCase_ : List[Any] = replace_key_with_offset(A__ ,A__ ,"mlp.fc2" ,"output.conv2" ) if "norm1" in key: UpperCAmelCase_ : List[Any] = replace_key_with_offset(A__ ,A__ ,"norm1" ,"before_norm" ) if "norm2" in key: UpperCAmelCase_ : str = replace_key_with_offset(A__ ,A__ ,"norm2" ,"after_norm" ) if "layer_scale_1" in key: UpperCAmelCase_ : Optional[Any] = replace_key_with_offset(A__ ,A__ ,"layer_scale_1" ,"layer_scale_1" ) if "layer_scale_2" in key: UpperCAmelCase_ : str = replace_key_with_offset(A__ ,A__ ,"layer_scale_2" ,"layer_scale_2" ) if "head" in key: UpperCAmelCase_ : List[str] = key.replace("head" ,"classifier" ) UpperCAmelCase_ : Tuple = value return new_state_dict def snake_case ( ): UpperCAmelCase_ : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : Any = Image.open(requests.get(A__ ,stream=A__ ).raw ) return image @torch.no_grad() def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : List[Any] = PoolFormerConfig() # set attributes based on model_name UpperCAmelCase_ : Union[str, Any] = "huggingface/label-files" UpperCAmelCase_ : Dict = model_name[-3:] UpperCAmelCase_ : List[Any] = 10_00 UpperCAmelCase_ : List[str] = "imagenet-1k-id2label.json" UpperCAmelCase_ : Union[str, Any] = (1, 10_00) # set config attributes UpperCAmelCase_ : List[str] = json.load(open(hf_hub_download(A__ ,A__ ,repo_type="dataset" ) ,"r" ) ) UpperCAmelCase_ : str = {int(A__ ): v for k, v in idalabel.items()} UpperCAmelCase_ : Dict = idalabel UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()} if size == "s12": UpperCAmelCase_ : Any = [2, 2, 6, 2] UpperCAmelCase_ : Union[str, Any] = [64, 1_28, 3_20, 5_12] UpperCAmelCase_ : int = 4.0 UpperCAmelCase_ : Tuple = 0.9 elif size == "s24": UpperCAmelCase_ : List[Any] = [4, 4, 12, 4] UpperCAmelCase_ : Tuple = [64, 1_28, 3_20, 5_12] UpperCAmelCase_ : List[Any] = 4.0 UpperCAmelCase_ : Dict = 0.9 elif size == "s36": UpperCAmelCase_ : Optional[int] = [6, 6, 18, 6] UpperCAmelCase_ : Tuple = [64, 1_28, 3_20, 5_12] UpperCAmelCase_ : List[Any] = 4.0 UpperCAmelCase_ : int = 1e-6 UpperCAmelCase_ : Union[str, Any] = 0.9 elif size == "m36": UpperCAmelCase_ : List[Any] = [6, 6, 18, 6] UpperCAmelCase_ : List[str] = [96, 1_92, 3_84, 7_68] UpperCAmelCase_ : Union[str, Any] = 4.0 UpperCAmelCase_ : List[str] = 1e-6 UpperCAmelCase_ : List[str] = 0.95 elif size == "m48": UpperCAmelCase_ : Tuple = [8, 8, 24, 8] UpperCAmelCase_ : List[Any] = [96, 1_92, 3_84, 7_68] UpperCAmelCase_ : List[Any] = 4.0 UpperCAmelCase_ : str = 1e-6 UpperCAmelCase_ : Union[str, Any] = 0.95 else: raise ValueError(F"""Size {size} not supported""" ) # load image processor UpperCAmelCase_ : Optional[Any] = PoolFormerImageProcessor(crop_pct=A__ ) # Prepare image UpperCAmelCase_ : Union[str, Any] = prepare_img() UpperCAmelCase_ : Any = image_processor(images=A__ ,return_tensors="pt" ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict UpperCAmelCase_ : Dict = torch.load(A__ ,map_location=torch.device("cpu" ) ) # rename keys UpperCAmelCase_ : Dict = rename_keys(A__ ) # create HuggingFace model and load state dict UpperCAmelCase_ : List[Any] = PoolFormerForImageClassification(A__ ) model.load_state_dict(A__ ) model.eval() # Define image processor UpperCAmelCase_ : Union[str, Any] = PoolFormerImageProcessor(crop_pct=A__ ) UpperCAmelCase_ : Optional[int] = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values # forward pass UpperCAmelCase_ : Any = model(A__ ) UpperCAmelCase_ : int = outputs.logits # define expected logit slices for different models if size == "s12": UpperCAmelCase_ : List[Any] = torch.tensor([-0.3045, -0.6758, -0.4869] ) elif size == "s24": UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.4402, -0.1374, -0.8045] ) elif size == "s36": UpperCAmelCase_ : Union[str, Any] = torch.tensor([-0.6080, -0.5133, -0.5898] ) elif size == "m36": UpperCAmelCase_ : List[Any] = torch.tensor([0.3952, 0.2263, -1.2668] ) elif size == "m48": UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.1167, -0.0656, -0.3423] ) else: raise ValueError(F"""Size {size} not supported""" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] ,A__ ,atol=1e-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''poolformer_s12''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCamelCase_ = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
253
"""simple docstring""" def snake_case ( A__ ): return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(A__ ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__('''doctest''').testmod()
253
1
"""simple docstring""" def a_ ( _lowerCAmelCase : str = 1000 ): '''simple docstring''' lowercase__ : Optional[Any] = 2**power lowercase__ : str = str(_SCREAMING_SNAKE_CASE ) lowercase__ : str = list(_SCREAMING_SNAKE_CASE ) lowercase__ : int = 0 for i in list_num: sum_of_num += int(_SCREAMING_SNAKE_CASE ) return sum_of_num if __name__ == "__main__": _UpperCamelCase : Any = int(input("Enter the power of 2: ").strip()) print("2 ^ ", power, " = ", 2**power) _UpperCamelCase : Tuple = solution(power) print("Sum of the digits is: ", result)
77
import os from distutils.util import strtobool def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' for e in env_keys: SCREAMING_SNAKE_CASE = int(os.environ.get(_SCREAMING_SNAKE_CASE , -1 ) ) if val >= 0: return val return default def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) ) return strtobool(_SCREAMING_SNAKE_CASE ) == 1 # As its name indicates `strtobool` actually returns an int... def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="no" ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) ) return value
296
0
'''simple docstring''' import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" return EnvironmentCommand() class UpperCAmelCase ( snake_case_ ): @staticmethod def lowercase__ ( __snake_case : ArgumentParser ) -> Optional[int]: _lowerCAmelCase = parser.add_parser("""env""" ) download_parser.set_defaults(func=__snake_case ) def lowercase__ ( self : Optional[int] ) -> List[str]: _lowerCAmelCase = huggingface_hub.__version__ _lowerCAmelCase = """not installed""" _lowerCAmelCase = """NA""" if is_torch_available(): import torch _lowerCAmelCase = torch.__version__ _lowerCAmelCase = torch.cuda.is_available() _lowerCAmelCase = """not installed""" if is_transformers_available(): import transformers _lowerCAmelCase = transformers.__version__ _lowerCAmelCase = """not installed""" if is_accelerate_available(): import accelerate _lowerCAmelCase = accelerate.__version__ _lowerCAmelCase = """not installed""" if is_xformers_available(): import xformers _lowerCAmelCase = xformers.__version__ _lowerCAmelCase = { """`diffusers` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """PyTorch version (GPU?)""": f"{pt_version} ({pt_cuda_available})", """Huggingface_hub version""": hub_version, """Transformers version""": transformers_version, """Accelerate version""": accelerate_version, """xFormers version""": xformers_version, """Using GPU in script?""": """<fill in>""", """Using distributed or parallel set-up in script?""": """<fill in>""", } print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" ) print(self.format_dict(__snake_case ) ) return info @staticmethod def lowercase__ ( __snake_case : Any ) -> Union[str, Any]: return "\n".join([f"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
220
'''simple docstring''' import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def UpperCamelCase__ ( lowerCAmelCase=None , lowerCAmelCase=None ): """simple docstring""" return field(default_factory=lambda: default , metadata=lowerCAmelCase ) @dataclass class UpperCAmelCase : _lowercase: str = field( metadata={'''help''': '''The csv file to plot.'''} , ) _lowercase: bool = field( default=snake_case_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , ) _lowercase: bool = field( default=snake_case_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , ) _lowercase: bool = field( default=snake_case_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , ) _lowercase: bool = field( default=snake_case_ , metadata={ '''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.''' } , ) _lowercase: Optional[str] = field( default=snake_case_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , ) _lowercase: Optional[List[str]] = list_field( default=snake_case_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" try: int(lowerCAmelCase ) return True except ValueError: return False def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" try: float(lowerCAmelCase ) return True except ValueError: return False class UpperCAmelCase : def __init__( self : List[str] , __snake_case : Union[str, Any] ) -> int: _lowerCAmelCase = args _lowerCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline="""""" ) as csv_file: _lowerCAmelCase = csv.DictReader(__snake_case ) for row in reader: _lowerCAmelCase = row["""model"""] self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) ) self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) ) if can_convert_to_int(row["""result"""] ): # value is not None _lowerCAmelCase = int(row["""result"""] ) elif can_convert_to_float(row["""result"""] ): # value is not None _lowerCAmelCase = float(row["""result"""] ) def lowercase__ ( self : Dict ) -> str: _lowerCAmelCase , _lowerCAmelCase = plt.subplots() _lowerCAmelCase = """Time usage""" if self.args.is_time else """Memory usage""" _lowerCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference""" if not self.args.no_log_scale: # set logarithm scales ax.set_xscale("""log""" ) ax.set_yscale("""log""" ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): _lowerCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) ) _lowerCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) ) _lowerCAmelCase = self.result_dict[model_name]["""result"""] ((_lowerCAmelCase) , (_lowerCAmelCase)) = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) _lowerCAmelCase = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: _lowerCAmelCase = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__snake_case , ) else: _lowerCAmelCase = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((_lowerCAmelCase) , (_lowerCAmelCase)) = ( ("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""") ) _lowerCAmelCase = np.asarray(__snake_case , __snake_case )[: len(__snake_case )] plt.scatter( __snake_case , __snake_case , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" ) plt.plot(__snake_case , __snake_case , """--""" ) title_str += f" {label_model_name} vs." _lowerCAmelCase = title_str[:-4] _lowerCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB""" # plot plt.title(__snake_case ) plt.xlabel(__snake_case ) plt.ylabel(__snake_case ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = HfArgumentParser(lowerCAmelCase ) _lowerCAmelCase = parser.parse_args_into_dataclasses()[0] _lowerCAmelCase = Plot(args=lowerCAmelCase ) plot.plot() if __name__ == "__main__": main()
220
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { 'microsoft/unispeech-sat-base-100h-libri-ft': ( 'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json' ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class _A ( _lowerCamelCase ): _UpperCamelCase : Optional[int] = '''unispeech-sat''' def __init__( self : Tuple , _A : List[str]=32 , _A : str=768 , _A : Optional[int]=12 , _A : List[Any]=12 , _A : int=3_072 , _A : Dict="gelu" , _A : int=0.1 , _A : Optional[int]=0.1 , _A : str=0.1 , _A : str=0.0 , _A : Tuple=0.0 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : int=0.02 , _A : List[str]=1E-5 , _A : Tuple="group" , _A : Optional[int]="gelu" , _A : List[Any]=(512, 512, 512, 512, 512, 512, 512) , _A : Dict=(5, 2, 2, 2, 2, 2, 2) , _A : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , _A : List[str]=False , _A : Optional[int]=128 , _A : int=16 , _A : List[Any]=False , _A : Optional[int]=True , _A : int=0.05 , _A : Any=10 , _A : int=2 , _A : List[str]=0.0 , _A : str=10 , _A : Optional[int]=0 , _A : str=320 , _A : Union[str, Any]=2 , _A : Any=0.1 , _A : Tuple=100 , _A : Any=256 , _A : List[Any]=256 , _A : Dict=0.1 , _A : Any="mean" , _A : List[str]=False , _A : Union[str, Any]=False , _A : Optional[int]=256 , _A : Optional[Any]=(512, 512, 512, 512, 1_500) , _A : List[str]=(5, 3, 3, 1, 1) , _A : Union[str, Any]=(1, 2, 3, 1, 1) , _A : List[str]=512 , _A : str=0 , _A : Optional[int]=1 , _A : Dict=2 , _A : str=504 , **_A : Union[str, Any] , ) -> int: """simple docstring""" super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A ) lowercase : List[Any] = hidden_size lowercase : int = feat_extract_norm lowercase : Union[str, Any] = feat_extract_activation lowercase : List[Any] = list(_A ) lowercase : str = list(_A ) lowercase : Optional[int] = list(_A ) lowercase : Optional[Any] = conv_bias lowercase : str = num_conv_pos_embeddings lowercase : Any = num_conv_pos_embedding_groups lowercase : Optional[Any] = len(self.conv_dim ) lowercase : Optional[int] = num_hidden_layers lowercase : List[Any] = intermediate_size lowercase : Any = hidden_act lowercase : Any = num_attention_heads lowercase : int = hidden_dropout lowercase : Tuple = attention_dropout lowercase : Optional[int] = activation_dropout lowercase : List[str] = feat_proj_dropout lowercase : Tuple = final_dropout lowercase : Union[str, Any] = layerdrop lowercase : int = layer_norm_eps lowercase : List[str] = initializer_range lowercase : List[str] = vocab_size lowercase : int = num_clusters lowercase : int = do_stable_layer_norm lowercase : Tuple = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase : List[Any] = apply_spec_augment lowercase : Optional[Any] = mask_time_prob lowercase : Any = mask_time_length lowercase : Union[str, Any] = mask_time_min_masks lowercase : Union[str, Any] = mask_feature_prob lowercase : Tuple = mask_feature_length lowercase : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase : Any = num_codevectors_per_group lowercase : Any = num_codevector_groups lowercase : List[str] = contrastive_logits_temperature lowercase : str = feat_quantizer_dropout lowercase : Optional[int] = num_negatives lowercase : Tuple = codevector_dim lowercase : List[str] = proj_codevector_dim lowercase : Optional[Any] = diversity_loss_weight # ctc loss lowercase : Optional[int] = ctc_loss_reduction lowercase : str = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase : Any = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase : List[Any] = list(_A ) lowercase : Dict = list(_A ) lowercase : Optional[Any] = list(_A ) lowercase : str = xvector_output_dim @property def __a ( self : List[str] ) -> List[Any]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
308
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : int = 1.5 lowercase : int = int(factor * num_class_images ) lowercase : Any = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 ) os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ ) if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: lowercase : str = client.query(text=__magic_name__ ) if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4: break else: lowercase : List[str] = int(factor * num_images ) lowercase : List[str] = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , ) lowercase : Dict = 0 lowercase : Optional[Any] = 0 lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ ) with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open( F"""{class_data_dir}/images.txt""" , '''w''' ) as fa: while total < num_class_images: lowercase : int = class_images[count] count += 1 try: lowercase : int = requests.get(images['''url'''] ) if img.status_code == 2_00: lowercase : List[Any] = Image.open(BytesIO(img.content ) ) with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f: f.write(img.content ) fa.write(images['''caption'''] + '''\n''' ) fa.write(images['''url'''] + '''\n''' ) fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def snake_case( ) -> Optional[int]: '''simple docstring''' lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ ) parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ ) parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ ) parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ ) return parser.parse_args() if __name__ == "__main__": lowerCAmelCase_ = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
308
1
def __lowerCamelCase ( lowerCamelCase__ = 200 ): """simple docstring""" lowercase__ : Tuple = [1, 2, 5, 10, 20, 50, 100, 200] lowercase__ : List[str] = [0] * (pence + 1) lowercase__ : Tuple = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_A , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_0_0) == 7_3_6_8_2
353
import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''') # TF training parameters lowerCAmelCase__ = False lowerCAmelCase__ = False def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" return TrainCommand(lowerCamelCase__ ) class snake_case__(_UpperCamelCase ): """simple docstring""" @staticmethod def snake_case ( SCREAMING_SNAKE_CASE : ArgumentParser ): lowercase__ : Optional[int] = parser.add_parser("train" , help="CLI tool to train a model on a task." ) train_parser.add_argument( "--train_data" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , ) train_parser.add_argument( "--column_label" , type=SCREAMING_SNAKE_CASE , default=0 , help="Column of the dataset csv file with example labels." ) train_parser.add_argument( "--column_text" , type=SCREAMING_SNAKE_CASE , default=1 , help="Column of the dataset csv file with example texts." ) train_parser.add_argument( "--column_id" , type=SCREAMING_SNAKE_CASE , default=2 , help="Column of the dataset csv file with example ids." ) train_parser.add_argument( "--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." ) train_parser.add_argument("--validation_data" , type=SCREAMING_SNAKE_CASE , default="" , help="path to validation dataset." ) train_parser.add_argument( "--validation_split" , type=SCREAMING_SNAKE_CASE , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , ) train_parser.add_argument("--output" , type=SCREAMING_SNAKE_CASE , default="./" , help="path to saved the trained model." ) train_parser.add_argument( "--task" , type=SCREAMING_SNAKE_CASE , default="text_classification" , help="Task to train the model on." ) train_parser.add_argument( "--model" , type=SCREAMING_SNAKE_CASE , default="bert-base-uncased" , help="Model's name or path to stored model." ) train_parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE , default=32 , help="Batch size for training." ) train_parser.add_argument("--valid_batch_size" , type=SCREAMING_SNAKE_CASE , default=64 , help="Batch size for validation." ) train_parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE , default=3E-5 , help="Learning rate." ) train_parser.add_argument("--adam_epsilon" , type=SCREAMING_SNAKE_CASE , default=1E-0_8 , help="Epsilon for Adam optimizer." ) train_parser.set_defaults(func=SCREAMING_SNAKE_CASE ) def __init__( self : int , SCREAMING_SNAKE_CASE : Namespace ): lowercase__ : int = logging.get_logger("transformers-cli/training" ) lowercase__ : List[Any] = "tf" if is_tf_available() else "torch" os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = args.output lowercase__ : Union[str, Any] = args.column_label lowercase__ : Optional[int] = args.column_text lowercase__ : Optional[int] = args.column_id self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" ) if args.task == "text_classification": lowercase__ : int = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f"""Loading dataset from {args.train_data}""" ) lowercase__ : List[str] = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) lowercase__ : Union[str, Any] = None if args.validation_data: self.logger.info(f"""Loading validation dataset from {args.validation_data}""" ) lowercase__ : Optional[int] = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) lowercase__ : Dict = args.validation_split lowercase__ : List[str] = args.train_batch_size lowercase__ : Any = args.valid_batch_size lowercase__ : Optional[int] = args.learning_rate lowercase__ : int = args.adam_epsilon def snake_case ( self : Dict ): if self.framework == "tf": return self.run_tf() return self.run_torch() def snake_case ( self : Union[str, Any] ): raise NotImplementedError def snake_case ( self : Union[str, Any] ): self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
121
0
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever _A = logging.getLogger(__name__) class UpperCAmelCase__ ( A_ ): """simple docstring""" def __init__( self , A_ , A_ , A_ , A_=None ) -> List[str]: super().__init__( A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , ) __UpperCamelCase =None def _a ( self , A_ ) -> Any: logger.info('initializing retrieval' ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info('dist initialized' ) # needs to be set manually __UpperCamelCase =self._infer_socket_ifname() # avoid clash with the NCCL port __UpperCamelCase =str(distributed_port + 1 ) __UpperCamelCase =dist.new_group(ranks=A_ , backend='gloo' ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info('dist not initialized / main' ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def _a ( self ) -> List[str]: return dist.get_rank(group=self.process_group ) == 0 def _a ( self , A_ , A_ , A_=torch.floataa ) -> Tuple: __UpperCamelCase =torch.empty(A_ , dtype=A_ ) dist.scatter(A_ , src=0 , scatter_list=A_ , group=self.process_group ) return target_tensor def _a ( self ) -> Dict: __UpperCamelCase =psutil.net_if_addrs() # a hacky way to deal with varying network interface names __UpperCamelCase =next((addr for addr in addrs if addr.startswith('e' )) , A_ ) return ifname def _a ( self , A_ , A_ ) -> Tuple[np.ndarray, List[dict]]: # single GPU training if not dist.is_initialized(): __UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ ) # distributed training __UpperCamelCase =dist.get_world_size(group=self.process_group ) # gather logic __UpperCamelCase =None if self._is_main(): __UpperCamelCase =[torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(A_ )] dist.gather(torch.tensor(A_ ) , dst=0 , gather_list=A_ , group=self.process_group ) # scatter logic __UpperCamelCase =question_hidden_states.shape[0] __UpperCamelCase =[] __UpperCamelCase =[] if self._is_main(): assert len(A_ ) == world_size __UpperCamelCase , __UpperCamelCase =self._main_retrieve(torch.cat(A_ ).numpy() , A_ ) __UpperCamelCase , __UpperCamelCase =torch.tensor(A_ ), torch.tensor(A_ ) __UpperCamelCase =self._chunk_tensor(A_ , A_ ) __UpperCamelCase =self._chunk_tensor(A_ , A_ ) __UpperCamelCase =self._scattered(A_ , [n_queries, n_docs] , target_type=torch.intaa ) __UpperCamelCase =self._scattered(A_ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(A_ )
62
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ = None ) -> None: if components is None: __UpperCamelCase =[] __UpperCamelCase =list(A_ ) def __len__( self ) -> int: return len(self.__components ) def __str__( self ) -> str: return "(" + ",".join(map(A_ , self.__components ) ) + ")" def __add__( self , A_ ) -> Vector: __UpperCamelCase =len(self ) if size == len(A_ ): __UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: raise Exception('must have the same size' ) def __sub__( self , A_ ) -> Vector: __UpperCamelCase =len(self ) if size == len(A_ ): __UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: # error case raise Exception('must have the same size' ) @overload def __mul__( self , A_ ) -> Vector: ... @overload def __mul__( self , A_ ) -> float: ... def __mul__( self , A_ ) -> float | Vector: if isinstance(A_ , (float, int) ): __UpperCamelCase =[c * other for c in self.__components] return Vector(A_ ) elif isinstance(A_ , A_ ) and len(self ) == len(A_ ): __UpperCamelCase =len(self ) __UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )] return sum(A_ ) else: # error case raise Exception('invalid operand!' ) def _a ( self ) -> Vector: return Vector(self.__components ) def _a ( self , A_ ) -> float: if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception('index out of range' ) def _a ( self , A_ , A_ ) -> None: assert -len(self.__components ) <= pos < len(self.__components ) __UpperCamelCase =value def _a ( self ) -> float: if len(self.__components ) == 0: raise Exception('Vector is empty' ) __UpperCamelCase =[c**2 for c in self.__components] return math.sqrt(sum(A_ ) ) def _a ( self , A_ , A_ = False ) -> float: __UpperCamelCase =self * other __UpperCamelCase =self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return Vector([0] * dimension ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )) __UpperCamelCase =[0] * dimension __UpperCamelCase =1 return Vector(SCREAMING_SNAKE_CASE__ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ): assert ( isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) )) ) return x * scalar + y def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): random.seed(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] return Vector(SCREAMING_SNAKE_CASE__ ) class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ , A_ , A_ ) -> None: __UpperCamelCase =matrix __UpperCamelCase =w __UpperCamelCase =h def __str__( self ) -> str: __UpperCamelCase ='' for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self , A_ ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): __UpperCamelCase =[] for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] + other.component(A_ , A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ , self.__width , self.__height ) else: raise Exception('matrix must have the same dimension!' ) def __sub__( self , A_ ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): __UpperCamelCase =[] for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] - other.component(A_ , A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ , self.__width , self.__height ) else: raise Exception('matrices must have the same dimension!' ) @overload def __mul__( self , A_ ) -> Matrix: ... @overload def __mul__( self , A_ ) -> Vector: ... def __mul__( self , A_ ) -> Vector | Matrix: if isinstance(A_ , A_ ): # matrix-vector if len(A_ ) == self.__width: __UpperCamelCase =zero_vector(self.__height ) for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] * other.component(A_ ) for j in range(self.__width ) ] ans.change_component(A_ , sum(A_ ) ) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!' ) elif isinstance(A_ , (int, float) ): # matrix-scalar __UpperCamelCase =[ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(A_ , self.__width , self.__height ) return None def _a ( self ) -> int: return self.__height def _a ( self ) -> int: return self.__width def _a ( self , A_ , A_ ) -> float: if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds' ) def _a ( self , A_ , A_ , A_ ) -> None: if 0 <= x < self.__height and 0 <= y < self.__width: __UpperCamelCase =value else: raise Exception('change_component: indices out of bounds' ) def _a ( self , A_ , A_ ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) __UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(A_ ) ): __UpperCamelCase =minor[i][:y] + minor[i][y + 1 :] return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant() def _a ( self , A_ , A_ ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(A_ , A_ ) else: raise Exception('Indices out of bounds' ) def _a ( self ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) if self.__height < 1: raise Exception('Matrix has no element' ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __UpperCamelCase =[ self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width ) ] return sum(A_ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )] return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): random.seed(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[ [random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ ) ] return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
62
1
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset from utils import logger class _UpperCAmelCase ( _lowerCAmelCase ): def __init__( self : List[str] , _lowercase : Tuple , _lowercase : Any ): __UpperCAmelCase = params __UpperCAmelCase = np.array(_A ) __UpperCAmelCase = np.array([len(_A ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ): return (self.token_ids[index], self.lengths[index]) def __len__( self : List[str] ): return len(self.lengths ) def a ( self : Optional[Any] ): assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def a ( self : Dict ): __UpperCAmelCase = self.params.max_model_input_size __UpperCAmelCase = self.lengths > max_len logger.info(F'''Splitting {sum(_A )} too long sequences.''' ) def divide_chunks(_lowercase : str , _lowercase : List[Any] ): return [l[i : i + n] for i in range(0 , len(_A ) , _A )] __UpperCAmelCase = [] __UpperCAmelCase = [] if self.params.mlm: __UpperCAmelCase = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: __UpperCAmelCase = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __UpperCAmelCase = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __UpperCAmelCase = np.insert(_A , 0 , _A ) if sub_s[-1] != sep_id: __UpperCAmelCase = np.insert(_A , len(_A ) , _A ) assert len(_A ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(_A ) new_tok_ids.extend(_A ) new_lengths.extend([len(_A ) for l in sub_seqs] ) __UpperCAmelCase = np.array(_A ) __UpperCAmelCase = np.array(_A ) def a ( self : Union[str, Any] ): __UpperCAmelCase = len(self ) __UpperCAmelCase = self.lengths > 11 __UpperCAmelCase = self.token_ids[indices] __UpperCAmelCase = self.lengths[indices] __UpperCAmelCase = len(self ) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def a ( self : int ): if "unk_token" not in self.params.special_tok_ids: return else: __UpperCAmelCase = self.params.special_tok_ids['unk_token'] __UpperCAmelCase = len(self ) __UpperCAmelCase = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __UpperCAmelCase = (unk_occs / self.lengths) < 0.5 __UpperCAmelCase = self.token_ids[indices] __UpperCAmelCase = self.lengths[indices] __UpperCAmelCase = len(self ) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def a ( self : Any ): if not self.params.is_master: return logger.info(F'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def a ( self : Dict , _lowercase : Optional[Any] ): __UpperCAmelCase = [t[0] for t in batch] __UpperCAmelCase = [t[1] for t in batch] assert len(_A ) == len(_A ) # Max for paddings __UpperCAmelCase = max(_A ) # Pad token ids if self.params.mlm: __UpperCAmelCase = self.params.special_tok_ids['pad_token'] else: __UpperCAmelCase = self.params.special_tok_ids['unk_token'] __UpperCAmelCase = [list(t.astype(_A ) ) + [pad_idx] * (max_seq_len_ - len(_A )) for t in token_ids] assert len(tk_ ) == len(_A ) assert all(len(_A ) == max_seq_len_ for t in tk_ ) __UpperCAmelCase = torch.tensor(tk_ ) # (bs, max_seq_len_) __UpperCAmelCase = torch.tensor(_A ) # (bs) return tk_t, lg_t
356
"""simple docstring""" import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class _UpperCAmelCase : def __init__( self : Dict , _lowercase : int , _lowercase : List[str]=13 , _lowercase : Dict=32 , _lowercase : Any=2 , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=16 , _lowercase : Optional[int]=[1, 2, 1] , _lowercase : int=[2, 2, 4] , _lowercase : Optional[Any]=2 , _lowercase : Union[str, Any]=2.0 , _lowercase : Any=True , _lowercase : Optional[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : Dict=0.1 , _lowercase : str="gelu" , _lowercase : List[Any]=False , _lowercase : List[Any]=True , _lowercase : Optional[Any]=0.02 , _lowercase : str=1E-5 , _lowercase : str=True , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Any=10 , _lowercase : int=8 , _lowercase : Optional[Any]=["stage1", "stage2", "stage3"] , _lowercase : Optional[Any]=[1, 2, 3] , ): __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = image_size __UpperCAmelCase = patch_size __UpperCAmelCase = num_channels __UpperCAmelCase = embed_dim __UpperCAmelCase = depths __UpperCAmelCase = num_heads __UpperCAmelCase = window_size __UpperCAmelCase = mlp_ratio __UpperCAmelCase = qkv_bias __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = drop_path_rate __UpperCAmelCase = hidden_act __UpperCAmelCase = use_absolute_embeddings __UpperCAmelCase = patch_norm __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = initializer_range __UpperCAmelCase = is_training __UpperCAmelCase = scope __UpperCAmelCase = use_labels __UpperCAmelCase = type_sequence_label_size __UpperCAmelCase = encoder_stride __UpperCAmelCase = out_features __UpperCAmelCase = out_indices def a ( self : int ): __UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase = self.get_config() return config, pixel_values, labels def a ( self : Dict ): return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : int ): __UpperCAmelCase = MaskFormerSwinModel(config=_lowercase ) model.to(_lowercase ) model.eval() __UpperCAmelCase = model(_lowercase ) __UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def a ( self : int , _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Dict ): __UpperCAmelCase = MaskFormerSwinBackbone(config=_lowercase ) model.to(_lowercase ) model.eval() __UpperCAmelCase = model(_lowercase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(_lowercase ): __UpperCAmelCase = ['''stem'''] __UpperCAmelCase = MaskFormerSwinBackbone(config=_lowercase ) def a ( self : Optional[int] ): __UpperCAmelCase = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs __UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): a__ : List[Any] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) a__ : Optional[int] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} a__ : List[str] = False a__ : int = False a__ : str = False a__ : str = False a__ : Any = False def a ( self : Optional[Any] ): __UpperCAmelCase = MaskFormerSwinModelTester(self ) __UpperCAmelCase = ConfigTester(self , config_class=_lowercase , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with''' ''' `nn.DataParallel`''' ) ) def a ( self : int ): pass def a ( self : Dict ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a ( self : str ): return def a ( self : Optional[Any] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowercase ) def a ( self : Optional[int] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowercase ) @unittest.skip('''Swin does not use inputs_embeds''' ) def a ( self : List[Any] ): pass @unittest.skip('''Swin does not support feedforward chunking''' ) def a ( self : str ): pass def a ( self : Union[str, Any] ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(_lowercase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) ) def a ( self : Union[str, Any] ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(_lowercase ) __UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase = [*signature.parameters.keys()] __UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowercase ) @unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' ) def a ( self : Optional[Any] ): pass @unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' ) def a ( self : Optional[Any] ): pass def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Dict , _lowercase : Tuple ): __UpperCAmelCase = model_class(_lowercase ) model.to(_lowercase ) model.eval() with torch.no_grad(): __UpperCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) ) __UpperCAmelCase = outputs.hidden_states __UpperCAmelCase = getattr( self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_lowercase ) , _lowercase ) # Swin has a different seq_length __UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def a ( self : str ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __UpperCAmelCase = True self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase = True self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase ) def a ( self : Optional[Any] ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase = 3 __UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __UpperCAmelCase = True self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase = True self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) ) @unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' ) def a ( self : Any ): pass @unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' ) def a ( self : str ): pass @unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' ) def a ( self : Tuple ): pass def a ( self : Tuple ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_lowercase : List[str] ): __UpperCAmelCase = 0 return t def check_equivalence(_lowercase : List[Any] , _lowercase : Any , _lowercase : str , _lowercase : List[str]={} ): with torch.no_grad(): __UpperCAmelCase = model(**_lowercase , return_dict=_lowercase , **_lowercase ) __UpperCAmelCase = model(**_lowercase , return_dict=_lowercase , **_lowercase ).to_tuple() def recursive_check(_lowercase : Dict , _lowercase : Optional[Any] ): if isinstance(_lowercase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_lowercase , _lowercase ): recursive_check(_lowercase , _lowercase ) elif isinstance(_lowercase , _lowercase ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(_lowercase , _lowercase ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_lowercase ) , set_nan_tensor_to_zero(_lowercase ) , atol=1E-5 ) , msg=( '''Tuple and dict output are not equal. Difference:''' F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' F''' {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}. Dict has''' F''' `nan`: {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}.''' ) , ) recursive_check(_lowercase , _lowercase ) for model_class in self.all_model_classes: __UpperCAmelCase = model_class(_lowercase ) model.to(_lowercase ) model.eval() __UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase ) __UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase ) check_equivalence(_lowercase , _lowercase , _lowercase ) __UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase ) __UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase ) check_equivalence(_lowercase , _lowercase , _lowercase ) __UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase ) __UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase ) check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} ) __UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase ) __UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase ) check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} ) @require_torch class _UpperCAmelCase ( unittest.TestCase , _lowerCAmelCase ): a__ : Optional[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else () a__ : List[str] = MaskFormerSwinConfig def a ( self : List[str] ): __UpperCAmelCase = MaskFormerSwinModelTester(self ) def a ( self : List[Any] ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase = inputs_dict['''pixel_values'''].shape[0] for backbone_class in self.all_model_classes: __UpperCAmelCase = backbone_class(_lowercase ) backbone.to(_lowercase ) backbone.eval() __UpperCAmelCase = backbone(**_lowercase ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _lowercase ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True __UpperCAmelCase = backbone(**_lowercase , output_hidden_states=_lowercase ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: __UpperCAmelCase = backbone(**_lowercase , output_attentions=_lowercase ) self.assertIsNotNone(outputs.attentions )
86
0
import math def A_ ( a , a = 0 , a = 0 ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = end or len(a ) for i in range(a , a ): SCREAMING_SNAKE_CASE_ : List[Any] = i SCREAMING_SNAKE_CASE_ : Optional[Any] = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: SCREAMING_SNAKE_CASE_ : Tuple = array[temp_index - 1] temp_index -= 1 SCREAMING_SNAKE_CASE_ : str = temp_index_value return array def A_ ( a , a , a ): # Max Heap """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = index SCREAMING_SNAKE_CASE_ : str = 2 * index + 1 # Left Node SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: SCREAMING_SNAKE_CASE_ : Dict = left_index if right_index < heap_size and array[largest] < array[right_index]: SCREAMING_SNAKE_CASE_ : Any = right_index if largest != index: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[largest], array[index] heapify(a , a , a ) def A_ ( a ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = len(a ) for i in range(n // 2 , -1 , -1 ): heapify(a , a , a ) for i in range(n - 1 , 0 , -1 ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[0], array[i] heapify(a , 0 , a ) return array def A_ ( a , a , a , a ): """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def A_ ( a , a , a , a ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = low SCREAMING_SNAKE_CASE_ : Tuple = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[j], array[i] i += 1 def A_ ( a ): """simple docstring""" if len(a ) == 0: return array SCREAMING_SNAKE_CASE_ : Any = 2 * math.ceil(math.loga(len(a ) ) ) SCREAMING_SNAKE_CASE_ : int = 1_6 return intro_sort(a , 0 , len(a ) , a , a ) def A_ ( a , a , a , a , a ): """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(a ) max_depth -= 1 SCREAMING_SNAKE_CASE_ : Optional[int] = median_of_a(a , a , start + ((end - start) // 2) + 1 , end - 1 ) SCREAMING_SNAKE_CASE_ : Dict = partition(a , a , a , a ) intro_sort(a , a , a , a , a ) SCREAMING_SNAKE_CASE_ : List[Any] = p return insertion_sort(a , a , a ) if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : List[str] = input('Enter numbers separated by a comma : ').strip() lowerCAmelCase : Optional[Any] = [float(item) for item in user_input.split(',')] print(sort(unsorted))
253
import argparse from collections import defaultdict import yaml lowerCAmelCase : Dict = 'docs/source/en/_toctree.yml' def A_ ( a ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = defaultdict(a ) for doc in model_doc: counts[doc["local"]] += 1 SCREAMING_SNAKE_CASE_ : Tuple = [key for key, value in counts.items() if value > 1] SCREAMING_SNAKE_CASE_ : int = [] for duplicate_key in duplicates: SCREAMING_SNAKE_CASE_ : List[Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} ) if len(a ) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] ) # Sort return sorted(a , key=lambda a : s["title"].lower() ) def A_ ( a=False ): """simple docstring""" with open(a , encoding='utf-8' ) as f: SCREAMING_SNAKE_CASE_ : str = yaml.safe_load(f.read() ) # Get to the API doc SCREAMING_SNAKE_CASE_ : List[str] = 0 while content[api_idx]["title"] != "API": api_idx += 1 SCREAMING_SNAKE_CASE_ : List[str] = content[api_idx]['sections'] # Then to the model doc SCREAMING_SNAKE_CASE_ : List[str] = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 SCREAMING_SNAKE_CASE_ : Optional[Any] = api_doc[model_idx]['sections'] SCREAMING_SNAKE_CASE_ : List[str] = [(idx, section) for idx, section in enumerate(a ) if 'sections' in section] SCREAMING_SNAKE_CASE_ : List[Any] = False for idx, modality_doc in modalities_docs: SCREAMING_SNAKE_CASE_ : Tuple = modality_doc['sections'] SCREAMING_SNAKE_CASE_ : int = clean_model_doc_toc(a ) if old_modality_doc != new_modality_doc: SCREAMING_SNAKE_CASE_ : List[str] = True if overwrite: SCREAMING_SNAKE_CASE_ : Optional[int] = new_modality_doc if diff: if overwrite: SCREAMING_SNAKE_CASE_ : List[Any] = model_doc SCREAMING_SNAKE_CASE_ : List[Any] = api_doc with open(a , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(a , allow_unicode=a ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowerCAmelCase : List[str] = parser.parse_args() check_model_doc(args.fix_and_overwrite)
253
1
"""simple docstring""" import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def snake_case ( ): UpperCAmelCase_ : Any = torch.nn.Linear(2 ,4 ) UpperCAmelCase_ : Optional[int] = torch.optim.AdamW(model.parameters() ,lr=1.0 ) UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.OneCycleLR(A__ ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 ) UpperCAmelCase_ : Optional[Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) UpperCAmelCase_ : Union[str, Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def snake_case ( A__ ): return (model.weight.abs().sum() + model.bias.abs().sum()).item() def snake_case ( A__ ): UpperCAmelCase_ : List[Any] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(A__ ) class UpperCamelCase_ (__lowerCamelCase ): @require_cuda def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: UpperCAmelCase_ : Dict = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(UpperCamelCase_ ): UpperCAmelCase_ : str = Accelerator(cpu=UpperCamelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: UpperCAmelCase_ : Union[str, Any] = Accelerator() UpperCAmelCase_ : List[str] = GradientState() assert state.num_steps == 1 UpperCAmelCase_ : List[Any] = 4 assert state.num_steps == 4 assert state.sync_gradients is True UpperCAmelCase_ : Union[str, Any] = False assert state.sync_gradients is False GradientState._reset_state() def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = Accelerator() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = create_components() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : int = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: UpperCAmelCase_ : Union[str, Any] = Accelerator() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = create_components() accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ): pass with patch("torch.cuda.set_device" , UpperCamelCase_ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ): UpperCAmelCase_ : List[Any] = Accelerator() self.assertEqual(str(accelerator.state.device ) , "cuda:64" ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: UpperCAmelCase_ : int = Accelerator() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = create_components() accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) UpperCAmelCase_ : List[Any] = get_signature(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(UpperCamelCase_ ) # make sure random weights don't match load_random_weights(UpperCamelCase_ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase_ ) ) > 1e-3 ) # make sure loaded weights match accelerator.load_state(UpperCamelCase_ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase_ ) ) < 1e-3 ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: UpperCAmelCase_ : Union[str, Any] = Accelerator() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = create_components() accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) UpperCAmelCase_ : Optional[Any] = get_signature(UpperCamelCase_ ) # saving hook def save_config(lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ): UpperCAmelCase_ : Any = {"class_name": models[0].__class__.__name__} with open(os.path.join(UpperCamelCase_ , "data.json" ) , "w" ) as f: json.dump(UpperCamelCase_ , UpperCamelCase_ ) # loading hook def load_config(lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ): with open(os.path.join(UpperCamelCase_ , "data.json" ) , "r" ) as f: UpperCAmelCase_ : Dict = json.load(UpperCamelCase_ ) UpperCAmelCase_ : Dict = config["class_name"] UpperCAmelCase_ : int = accelerator.register_save_state_pre_hook(UpperCamelCase_ ) UpperCAmelCase_ : Tuple = accelerator.register_load_state_pre_hook(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(UpperCamelCase_ ) # make sure random weights don't match with hooks load_random_weights(UpperCamelCase_ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase_ ) ) > 1e-3 ) # random class name to verify correct one is loaded UpperCAmelCase_ : Union[str, Any] = "random" # make sure loaded weights match with hooks accelerator.load_state(UpperCamelCase_ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase_ ) ) < 1e-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(UpperCamelCase_ ) # make sure random weights don't match with hooks removed load_random_weights(UpperCamelCase_ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase_ ) ) > 1e-3 ) # random class name to verify correct one is loaded UpperCAmelCase_ : int = "random" # make sure loaded weights match with hooks removed accelerator.load_state(UpperCamelCase_ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase_ ) ) < 1e-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: UpperCAmelCase_ : Any = Accelerator() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = create_components() UpperCAmelCase_ : List[Any] = None # This should work UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) self.assertTrue(dummy_obj is None ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: UpperCAmelCase_ : Optional[int] = Accelerator() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = create_components() UpperCAmelCase_ : Optional[Any] = [1, 2, 3] # This should work UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual( getattr(UpperCamelCase_ , "_is_accelerate_prepared" , UpperCamelCase_ ) , UpperCamelCase_ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , ) self.assertEqual( getattr(UpperCamelCase_ , "_is_accelerate_prepared" , UpperCamelCase_ ) , UpperCamelCase_ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(UpperCamelCase_ , "_is_accelerate_prepared" , UpperCamelCase_ ) , UpperCamelCase_ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(UpperCamelCase_ , "_is_accelerate_prepared" , UpperCamelCase_ ) , UpperCamelCase_ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(UpperCamelCase_ , "_is_accelerate_prepared" , UpperCamelCase_ ) , UpperCamelCase_ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(UpperCamelCase_ , "_is_accelerate_prepared" , UpperCamelCase_ ) , UpperCamelCase_ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) @slow @require_bnb def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: from transformers import AutoModelForCausalLM UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=UpperCamelCase_ , device_map={"": 0} , ) UpperCAmelCase_ : Tuple = Accelerator() # This should work UpperCAmelCase_ : List[Any] = accelerator.prepare(UpperCamelCase_ ) @slow @require_bnb def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: from transformers import AutoModelForCausalLM UpperCAmelCase_ : Dict = Accelerator() with init_empty_weights(): UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() UpperCAmelCase_ : Dict = infer_auto_device_map(UpperCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = "cpu" UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , device_map=UpperCamelCase_ , load_in_abit=UpperCamelCase_ , llm_inta_enable_fpaa_cpu_offload=UpperCamelCase_ ) # This should not work and get value error with self.assertRaises(UpperCamelCase_ ): UpperCAmelCase_ : Any = accelerator.prepare(UpperCamelCase_ ) @slow @require_bnb @require_multi_gpu def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: from transformers import AutoModelForCausalLM UpperCAmelCase_ : Union[str, Any] = {"distributed_type": DistributedType.MULTI_GPU} with init_empty_weights(): UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() UpperCAmelCase_ : Dict = infer_auto_device_map(UpperCamelCase_ ) UpperCAmelCase_ : str = 1 UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=UpperCamelCase_ , device_map=UpperCamelCase_ , ) UpperCAmelCase_ : int = Accelerator() # This should not work and get value error with self.assertRaises(UpperCamelCase_ ): UpperCAmelCase_ : int = accelerator.prepare(UpperCamelCase_ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: from transformers import AutoModelForCausalLM with init_empty_weights(): UpperCAmelCase_ : Optional[int] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) UpperCAmelCase_ : int = infer_auto_device_map(UpperCamelCase_ ) UpperCAmelCase_ : str = 1 UpperCAmelCase_ : Dict = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=UpperCamelCase_ , device_map=UpperCamelCase_ , ) UpperCAmelCase_ : List[str] = Accelerator() # This should work UpperCAmelCase_ : str = accelerator.prepare(UpperCamelCase_ ) @require_cuda def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: UpperCAmelCase_ : Optional[Any] = torch.nn.Linear(10 , 10 ) UpperCAmelCase_ : Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.0_1 ) UpperCAmelCase_ : Union[str, Any] = Accelerator(cpu=UpperCamelCase_ ) UpperCAmelCase_ : Optional[Any] = accelerator.prepare(UpperCamelCase_ )
357
"""simple docstring""" import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowerCamelCase_ = '''hf-internal-testing/tiny-random-bert''' lowerCamelCase_ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''') lowerCamelCase_ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6''' class UpperCamelCase_ (unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: UpperCAmelCase_ : List[Any] = cached_file(lowerCAmelCase_ , lowerCAmelCase_ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(lowerCAmelCase_ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) ) with open(os.path.join(lowerCAmelCase_ , "refs" , "main" ) ) as f: UpperCAmelCase_ : Optional[int] = f.read() self.assertEqual(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , "snapshots" , lowerCAmelCase_ , lowerCAmelCase_ ) ) self.assertTrue(os.path.isfile(lowerCAmelCase_ ) ) # File is cached at the same place the second time. UpperCAmelCase_ : List[str] = cached_file(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # Using a specific revision to test the full commit hash. UpperCAmelCase_ : int = cached_file(lowerCAmelCase_ , lowerCAmelCase_ , revision="9b8c223" ) self.assertEqual(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , "snapshots" , lowerCAmelCase_ , lowerCAmelCase_ ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: with self.assertRaisesRegex(lowerCAmelCase_ , "is not a valid model identifier" ): UpperCAmelCase_ : List[Any] = cached_file("tiny-random-bert" , lowerCAmelCase_ ) with self.assertRaisesRegex(lowerCAmelCase_ , "is not a valid git identifier" ): UpperCAmelCase_ : Optional[Any] = cached_file(lowerCAmelCase_ , lowerCAmelCase_ , revision="aaaa" ) with self.assertRaisesRegex(lowerCAmelCase_ , "does not appear to have a file named" ): UpperCAmelCase_ : Union[str, Any] = cached_file(lowerCAmelCase_ , "conf" ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: with self.assertRaisesRegex(lowerCAmelCase_ , "does not appear to have a file named" ): UpperCAmelCase_ : Any = cached_file(lowerCAmelCase_ , "conf" ) with open(os.path.join(lowerCAmelCase_ , "refs" , "main" ) ) as f: UpperCAmelCase_ : List[str] = f.read() self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase_ , ".no_exist" , lowerCAmelCase_ , "conf" ) ) ) UpperCAmelCase_ : str = cached_file(lowerCAmelCase_ , "conf" , _raise_exceptions_for_missing_entries=lowerCAmelCase_ ) self.assertIsNone(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = cached_file(lowerCAmelCase_ , "conf" , local_files_only=lowerCAmelCase_ , _raise_exceptions_for_missing_entries=lowerCAmelCase_ ) self.assertIsNone(lowerCAmelCase_ ) UpperCAmelCase_ : Any = mock.Mock() UpperCAmelCase_ : List[str] = 500 UpperCAmelCase_ : Optional[Any] = {} UpperCAmelCase_ : List[Any] = HTTPError UpperCAmelCase_ : List[str] = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request" , return_value=lowerCAmelCase_ ) as mock_head: UpperCAmelCase_ : List[Any] = cached_file(lowerCAmelCase_ , "conf" , _raise_exceptions_for_connection_errors=lowerCAmelCase_ ) self.assertIsNone(lowerCAmelCase_ ) # This check we did call the fake head request mock_head.assert_called() def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase_ ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase_ ) ) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(lowerCAmelCase_ , "is not a valid model identifier" ): get_file_from_repo("bert-base-case" , lowerCAmelCase_ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(lowerCAmelCase_ , "is not a valid git identifier" ): get_file_from_repo("bert-base-cased" , lowerCAmelCase_ , revision="ahaha" ) UpperCAmelCase_ : int = get_file_from_repo("bert-base-cased" , lowerCAmelCase_ ) # The name is the cached name which is not very easy to test, so instead we load the content. UpperCAmelCase_ : Optional[int] = json.loads(open(lowerCAmelCase_ , "r" ).read() ) self.assertEqual(config["hidden_size"] , 768 ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Union[str, Any] = Path(lowerCAmelCase_ ) / "a.txt" filename.touch() self.assertEqual(get_file_from_repo(lowerCAmelCase_ , "a.txt" ) , str(lowerCAmelCase_ ) ) self.assertIsNone(get_file_from_repo(lowerCAmelCase_ , "b.txt" ) )
253
0
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( __snake_case : str ): '''simple docstring''' lowercase = 0 # if input_string is "aba" than new_input_string become "a|b|a" lowercase = '' lowercase = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(__snake_case ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring lowercase , lowercase = 0, 0 # length[i] shows the length of palindromic substring with center i lowercase = [1 for i in range(len(__snake_case ) )] # for each character in new_string find corresponding palindromic string lowercase = 0 for j in range(len(__snake_case ) ): lowercase = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(__snake_case ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 lowercase = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: lowercase = j - k + 1 # noqa: E741 lowercase = j + k - 1 # update max_length and start position if max_length < length[j]: lowercase = length[j] lowercase = j # create that string lowercase = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
220
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class a ( unittest.TestCase ): def UpperCamelCase_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def UpperCamelCase_ ( self ): lowercase , lowercase = FlaxStableDiffusionPipeline.from_pretrained( 'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , ) lowercase = 'A painting of a squirrel eating a burger' lowercase = jax.device_count() lowercase = num_samples * [prompt] lowercase = sd_pipe.prepare_inputs(_lowerCamelCase ) lowercase = replicate(_lowerCamelCase ) lowercase = shard(_lowerCamelCase ) lowercase = jax.random.PRNGKey(0 ) lowercase = jax.random.split(_lowerCamelCase , jax.device_count() ) lowercase = sd_pipe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_inference_steps=2_5 , jit=_lowerCamelCase )[0] assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3) lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowercase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCamelCase_ ( self ): lowercase = 'stabilityai/stable-diffusion-2' lowercase , lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCamelCase , subfolder='scheduler' ) lowercase , lowercase = FlaxStableDiffusionPipeline.from_pretrained( _lowerCamelCase , scheduler=_lowerCamelCase , revision='bf16' , dtype=jnp.bfloataa , ) lowercase = scheduler_params lowercase = 'A painting of a squirrel eating a burger' lowercase = jax.device_count() lowercase = num_samples * [prompt] lowercase = sd_pipe.prepare_inputs(_lowerCamelCase ) lowercase = replicate(_lowerCamelCase ) lowercase = shard(_lowerCamelCase ) lowercase = jax.random.PRNGKey(0 ) lowercase = jax.random.split(_lowerCamelCase , jax.device_count() ) lowercase = sd_pipe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_inference_steps=2_5 , jit=_lowerCamelCase )[0] assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3) lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowercase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
220
1
from __future__ import annotations def lowerCAmelCase_ ( __a ): """simple docstring""" lowerCamelCase__: str =2 lowerCamelCase__: Tuple =[] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(__a ) if n > 1: factors.append(__a ) return factors if __name__ == "__main__": import doctest doctest.testmod()
358
import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger __A = get_logger(__name__) class _SCREAMING_SNAKE_CASE ( enum.Enum ): '''simple docstring''' lowercase_ = "all_checks" lowercase_ = "basic_checks" lowercase_ = "no_checks" class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def lowerCAmelCase_ ( __a , __a , __a=None ) -> Optional[int]: """simple docstring""" if expected_checksums is None: logger.info("Unable to verify checksums." ) return if len(set(__a ) - set(__a ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(__a ) - set(__a ) ) ) if len(set(__a ) - set(__a ) ) > 0: raise UnexpectedDownloadedFile(str(set(__a ) - set(__a ) ) ) lowerCamelCase__: List[Any] =[url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] lowerCamelCase__: Union[str, Any] =" for " + verification_name if verification_name is not None else "" if len(__a ) > 0: raise NonMatchingChecksumError( F"""Checksums didn't match{for_verification_name}:\n""" F"""{bad_urls}\n""" "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" ) logger.info("All the checksums matched successfully" + for_verification_name ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def lowerCAmelCase_ ( __a , __a ) -> Any: """simple docstring""" if expected_splits is None: logger.info("Unable to verify splits sizes." ) return if len(set(__a ) - set(__a ) ) > 0: raise ExpectedMoreSplits(str(set(__a ) - set(__a ) ) ) if len(set(__a ) - set(__a ) ) > 0: raise UnexpectedSplits(str(set(__a ) - set(__a ) ) ) lowerCamelCase__: Optional[int] =[ {"expected": expected_splits[name], "recorded": recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(__a ) > 0: raise NonMatchingSplitsSizesError(str(__a ) ) logger.info("All the splits matched successfully." ) def lowerCAmelCase_ ( __a , __a = True ) -> dict: """simple docstring""" if record_checksum: lowerCamelCase__: str =shaaaa() with open(__a , "rb" ) as f: for chunk in iter(lambda: f.read(1 << 20 ) , b"" ): m.update(__a ) lowerCamelCase__: Dict =m.hexdigest() else: lowerCamelCase__: List[str] =None return {"num_bytes": os.path.getsize(__a ), "checksum": checksum} def lowerCAmelCase_ ( __a ) -> int: """simple docstring""" if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
273
0
'''simple docstring''' import datasets __SCREAMING_SNAKE_CASE :List[Any] = '''\ @InProceedings{conneau2018xnli, author = "Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin", title = "XNLI: Evaluating Cross-lingual Sentence Representations", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", year = "2018", publisher = "Association for Computational Linguistics", location = "Brussels, Belgium", } ''' __SCREAMING_SNAKE_CASE :Any = '''\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). ''' __SCREAMING_SNAKE_CASE :Dict = ''' Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: \'accuracy\': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric("xnli") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} ''' def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : Optional[int] ) -> str: '''simple docstring''' return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ ( datasets.Metric ): def lowercase ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def lowercase ( self : str , snake_case_ : str , snake_case_ : int ): return {"accuracy": simple_accuracy(snake_case_ , snake_case_ )}
22
UpperCAmelCase__ : Optional[Any] = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } def lowerCamelCase__ ( a , a , a ) -> list[str]: _A: Union[str, Any] = set() # keep track of all the paths to be checked _A: Union[str, Any] = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _A: Any = queue.pop(0 ) # get the last node from the path _A: Union[str, Any] = path[-1] if node not in explored: _A: str = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _A: Optional[int] = list(a ) new_path.append(a ) queue.append(a ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(a ) # in case there's no path between the 2 nodes return [] def lowerCamelCase__ ( a , a , a ) -> int: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _A: Any = [start] _A: List[str] = set(a ) # Keep tab on distances from `start` node. _A: Optional[int] = {start: 0, target: -1} while queue: _A: Union[str, Any] = queue.pop(0 ) if node == target: _A: Dict = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(a ) queue.append(a ) _A: List[Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
121
0
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency lowercase = { "E": 12.70, "T": 9.06, "A": 8.17, "O": 7.51, "I": 6.97, "N": 6.75, "S": 6.33, "H": 6.09, "R": 5.99, "D": 4.25, "L": 4.03, "C": 2.78, "U": 2.76, "M": 2.41, "W": 2.36, "F": 2.23, "G": 2.02, "Y": 1.97, "P": 1.93, "B": 1.29, "V": 0.98, "K": 0.77, "J": 0.15, "X": 0.15, "Q": 0.10, "Z": 0.07, } lowercase = "ETAOINSHRDLCUMWFGYPBVKJXQZ" lowercase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict[str, int]: A__ = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: return x[0] def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: A__ = get_letter_count(lowercase_ ) A__ = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(lowercase_ ) A__ = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase_ ) A__ = "".join(freq_to_letter[freq] ) A__ = list(freq_to_letter_str.items() ) freq_pairs.sort(key=lowercase_ , reverse=lowercase_ ) A__ = [freq_pair[1] for freq_pair in freq_pairs] return "".join(lowercase_ ) def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: A__ = get_frequency_order(lowercase_ ) A__ = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
370
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE = { "vocab_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE = { "facebook/nllb-large-en-ro": 1024, "facebook/nllb-200-distilled-600M": 1024, } # fmt: off SCREAMING_SNAKE_CASE = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"] class UpperCAmelCase_ ( A_ ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = ['''input_ids''', '''attention_mask'''] lowercase__ = NllbTokenizer lowercase__ = [] lowercase__ = [] def __init__( self : int , snake_case_ : int=None , snake_case_ : Any=None , snake_case_ : int="<s>" , snake_case_ : List[Any]="</s>" , snake_case_ : Optional[int]="</s>" , snake_case_ : int="<s>" , snake_case_ : str="<unk>" , snake_case_ : str="<pad>" , snake_case_ : Optional[int]="<mask>" , snake_case_ : str=None , snake_case_ : List[Any]=None , snake_case_ : Tuple=None , snake_case_ : Optional[int]=False , **snake_case_ : List[str] , ) -> Tuple: '''simple docstring''' A__ = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token A__ = legacy_behaviour super().__init__( vocab_file=snake_case_ , tokenizer_file=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , additional_special_tokens=snake_case_ , legacy_behaviour=snake_case_ , **snake_case_ , ) A__ = vocab_file A__ = False if not self.vocab_file else True A__ = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) A__ = { lang_code: self.convert_tokens_to_ids(snake_case_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } A__ = src_lang if src_lang is not None else "eng_Latn" A__ = self.convert_tokens_to_ids(self._src_lang ) A__ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __magic_name__ ( self : Union[str, Any] ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def __magic_name__ ( self : Optional[int] , snake_case_ : str ) -> None: '''simple docstring''' A__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __magic_name__ ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __magic_name__ ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __magic_name__ ( self : int , snake_case_ : Tuple , snake_case_ : str , snake_case_ : Optional[str] , snake_case_ : Optional[str] , **snake_case_ : Tuple ) -> List[Any]: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) A__ = src_lang A__ = self(snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , **snake_case_ ) A__ = self.convert_tokens_to_ids(snake_case_ ) A__ = tgt_lang_id return inputs def __magic_name__ ( self : int , snake_case_ : List[str] , snake_case_ : str = "eng_Latn" , snake_case_ : Optional[List[str]] = None , snake_case_ : str = "fra_Latn" , **snake_case_ : Dict , ) -> BatchEncoding: '''simple docstring''' A__ = src_lang A__ = tgt_lang return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ ) def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def __magic_name__ ( self : Tuple ) -> Dict: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __magic_name__ ( self : List[Any] , snake_case_ : Dict ) -> None: '''simple docstring''' A__ = self.convert_tokens_to_ids(snake_case_ ) if self.legacy_behaviour: A__ = [] A__ = [self.eos_token_id, self.cur_lang_code] else: A__ = [self.cur_lang_code] A__ = [self.eos_token_id] A__ = self.convert_ids_to_tokens(self.prefix_tokens ) A__ = self.convert_ids_to_tokens(self.suffix_tokens ) A__ = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __magic_name__ ( self : List[Any] , snake_case_ : str ) -> None: '''simple docstring''' A__ = self.convert_tokens_to_ids(snake_case_ ) if self.legacy_behaviour: A__ = [] A__ = [self.eos_token_id, self.cur_lang_code] else: A__ = [self.cur_lang_code] A__ = [self.eos_token_id] A__ = self.convert_ids_to_tokens(self.prefix_tokens ) A__ = self.convert_ids_to_tokens(self.suffix_tokens ) A__ = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __magic_name__ ( self : List[str] , snake_case_ : str , snake_case_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(snake_case_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" ) return A__ = os.path.join( snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ): copyfile(self.vocab_file , snake_case_ ) return (out_vocab_file,)
230
0
'''simple docstring''' _lowercase : Optional[int] = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []} _lowercase : str = ["a", "b", "c", "d", "e"] def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" lowercase_ : Optional[int] = start # add current to visited visited.append(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: lowercase_ : Dict = topological_sort(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # if all neighbors visited add current to sort sort.append(__SCREAMING_SNAKE_CASE ) # if all vertices haven't been visited select a new one to visit if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): for vertice in vertices: if vertice not in visited: lowercase_ : List[Any] = topological_sort(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # return sort return sort if __name__ == "__main__": _lowercase : Dict = topological_sort("a", [], []) print(sort)
93
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""", """microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""", } class A__ ( _lowerCamelCase): A_ : List[Any] = 'markuplm' def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ): super().__init__( pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Union[str, Any] = vocab_size __lowerCAmelCase : Any = hidden_size __lowerCAmelCase : List[Any] = num_hidden_layers __lowerCAmelCase : Tuple = num_attention_heads __lowerCAmelCase : Union[str, Any] = hidden_act __lowerCAmelCase : List[Any] = intermediate_size __lowerCAmelCase : List[str] = hidden_dropout_prob __lowerCAmelCase : List[str] = attention_probs_dropout_prob __lowerCAmelCase : Optional[int] = max_position_embeddings __lowerCAmelCase : int = type_vocab_size __lowerCAmelCase : Tuple = initializer_range __lowerCAmelCase : int = layer_norm_eps __lowerCAmelCase : List[str] = position_embedding_type __lowerCAmelCase : List[Any] = use_cache __lowerCAmelCase : Optional[Any] = classifier_dropout # additional properties __lowerCAmelCase : Optional[int] = max_depth __lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings __lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings __lowerCAmelCase : Any = tag_pad_id __lowerCAmelCase : Union[str, Any] = subs_pad_id __lowerCAmelCase : int = xpath_unit_hidden_size
86
0
from collections.abc import Callable def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = a UpperCAmelCase_ = b if function(__UpperCAmelCase ) == 0: # one of the a or b is a root for the function return a elif function(__UpperCAmelCase ) == 0: return b elif ( function(__UpperCAmelCase ) * function(__UpperCAmelCase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: UpperCAmelCase_ = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(__UpperCAmelCase ) == 0: return mid elif function(__UpperCAmelCase ) * function(__UpperCAmelCase ) < 0: UpperCAmelCase_ = mid else: UpperCAmelCase_ = mid UpperCAmelCase_ = start + (end - start) / 2.0 return mid def A ( __UpperCAmelCase ) -> List[str]: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
356
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "OPT_PRETRAINED_MODEL_ARCHIVE_LIST", "OPTForCausalLM", "OPTModel", "OPTPreTrainedModel", "OPTForSequenceClassification", "OPTForQuestionAnswering", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ "FlaxOPTForCausalLM", "FlaxOPTModel", "FlaxOPTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
344
0
'''simple docstring''' UpperCamelCase__: List[str] = [0, 2, 4, 6, 8] UpperCamelCase__: Any = [1, 3, 5, 7, 9] def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ) -> int: if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 UpperCAmelCase : Union[str, Any] = 0 for digit in range(10 ): UpperCAmelCase : Optional[Any] = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , _lowerCAmelCase , _lowerCAmelCase ) return result UpperCAmelCase : str = 0 for digita in range(10 ): UpperCAmelCase : List[Any] = digita if (remainder + digita) % 2 == 0: UpperCAmelCase : int = ODD_DIGITS else: UpperCAmelCase : Any = EVEN_DIGITS for digita in other_parity_digits: UpperCAmelCase : List[str] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCAmelCase , _lowerCAmelCase , ) return result def snake_case_ ( _lowerCAmelCase : int = 9 ) -> int: UpperCAmelCase : Union[str, Any] = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(_lowerCAmelCase , 0 , [0] * length , _lowerCAmelCase ) return result if __name__ == "__main__": print(F"{solution() = }")
23
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList lowerCAmelCase : List[Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class _A ( __magic_name__): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = tokenizer SCREAMING_SNAKE_CASE_ : List[str] = dataset SCREAMING_SNAKE_CASE_ : List[Any] = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies def __iter__( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() ) SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class _A ( __magic_name__): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = start_length SCREAMING_SNAKE_CASE_ : Any = eof_strings SCREAMING_SNAKE_CASE_ : Tuple = tokenizer def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(_SCREAMING_SNAKE_CASE ) def A_ ( a ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = re.split('(%s)' % '|'.join(a ) , a ) # last string should be "" return "".join(string_list[:-2] ) def A_ ( a , a , a , a , a , a=2_0 , **a ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = defaultdict(a ) # dict of list of generated tokens for step, batch in tqdm(enumerate(a ) ): with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Optional[Any] = batch['ids'].shape[-1] SCREAMING_SNAKE_CASE_ : str = accelerator.unwrap_model(a ).generate( input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=a , **a ) # each task is generated batch_size times SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch['task_id'].repeat(a ) SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.pad_across_processes( a , dim=1 , pad_index=tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) ) SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy() SCREAMING_SNAKE_CASE_ : List[Any] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(a , a ): gen_token_dict[task].append(a ) SCREAMING_SNAKE_CASE_ : str = [[] for _ in range(a )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) code_gens[task].append(remove_last_block(a ) ) return code_gens def A_ ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = HfArgumentParser(a ) SCREAMING_SNAKE_CASE_ : Tuple = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric SCREAMING_SNAKE_CASE_ : Optional[int] = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing SCREAMING_SNAKE_CASE_ : List[str] = 'false' if args.num_workers is None: SCREAMING_SNAKE_CASE_ : Union[str, Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate SCREAMING_SNAKE_CASE_ : Dict = Accelerator() set_seed(args.seed , device_specific=a ) # Load model and tokenizer SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained(args.model_ckpt ) SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.eos_token SCREAMING_SNAKE_CASE_ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings SCREAMING_SNAKE_CASE_ : Any = { 'do_sample': args.do_sample, 'temperature': args.temperature, 'max_new_tokens': args.max_new_tokens, 'top_p': args.top_p, 'top_k': args.top_k, 'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , a , a )] ), } # Load evaluation dataset and metric SCREAMING_SNAKE_CASE_ : List[str] = load_dataset('openai_humaneval' ) SCREAMING_SNAKE_CASE_ : str = load_metric('code_eval' ) SCREAMING_SNAKE_CASE_ : Tuple = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] ) SCREAMING_SNAKE_CASE_ : Any = args.n_samples // args.batch_size SCREAMING_SNAKE_CASE_ : int = TokenizedDataset(a , human_eval['test'] , n_copies=a , n_tasks=a ) # do not confuse args.batch_size, which is actually the num_return_sequences SCREAMING_SNAKE_CASE_ : Tuple = DataLoader(a , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: SCREAMING_SNAKE_CASE_ : Union[str, Any] = code_eval_metric.compute(references=[''] , predictions=[['']] ) except ValueError as exception: print( 'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`' ' flag to enable code evaluation.' ) raise exception SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.prepare(a , a ) SCREAMING_SNAKE_CASE_ : List[str] = complete_code( a , a , a , a , n_tasks=a , batch_size=args.batch_size , **a , ) if accelerator.is_main_process: SCREAMING_SNAKE_CASE_ : str = [] for task in tqdm(range(a ) ): SCREAMING_SNAKE_CASE_ : str = human_eval['test'][task]['test'] SCREAMING_SNAKE_CASE_ : int = f"check({human_eval['test'][task]['entry_point']})" references.append('\n' + test_func + '\n' + entry_point ) # Evaluate completions with "code_eval" metric SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute( references=a , predictions=a , num_workers=args.num_workers ) print(f"Results: {pass_at_k}" ) # Save results to json file with open(args.output_file , 'w' ) as fp: json.dump(a , a ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
253
0
import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def __UpperCAmelCase ( __a : int ) -> Union[str, Any]: """simple docstring""" _a : Any = SwinConfig(image_size=192 ) if "base" in model_name: _a : Optional[int] = 6 _a : int = 128 _a : str = (2, 2, 18, 2) _a : Tuple = (4, 8, 16, 32) elif "large" in model_name: _a : Union[str, Any] = 12 _a : List[Any] = 192 _a : str = (2, 2, 18, 2) _a : Optional[Any] = (6, 12, 24, 48) else: raise ValueError('''Model not supported, only supports base and large variants''' ) _a : List[str] = window_size _a : str = embed_dim _a : Union[str, Any] = depths _a : Optional[Any] = num_heads return config def __UpperCAmelCase ( __a : Tuple ) -> Dict: """simple docstring""" if "encoder.mask_token" in name: _a : int = name.replace('''encoder.mask_token''' ,'''embeddings.mask_token''' ) if "encoder.patch_embed.proj" in name: _a : List[str] = name.replace('''encoder.patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' ) if "encoder.patch_embed.norm" in name: _a : Tuple = name.replace('''encoder.patch_embed.norm''' ,'''embeddings.norm''' ) if "attn.proj" in name: _a : List[str] = name.replace('''attn.proj''' ,'''attention.output.dense''' ) if "attn" in name: _a : int = name.replace('''attn''' ,'''attention.self''' ) if "norm1" in name: _a : List[str] = name.replace('''norm1''' ,'''layernorm_before''' ) if "norm2" in name: _a : int = name.replace('''norm2''' ,'''layernorm_after''' ) if "mlp.fc1" in name: _a : Optional[Any] = name.replace('''mlp.fc1''' ,'''intermediate.dense''' ) if "mlp.fc2" in name: _a : int = name.replace('''mlp.fc2''' ,'''output.dense''' ) if name == "encoder.norm.weight": _a : Dict = '''layernorm.weight''' if name == "encoder.norm.bias": _a : Optional[Any] = '''layernorm.bias''' if "decoder" in name: pass else: _a : List[Any] = '''swin.''' + name return name def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> List[str]: """simple docstring""" for key in orig_state_dict.copy().keys(): _a : Tuple = orig_state_dict.pop(__a ) if "attn_mask" in key: pass elif "qkv" in key: _a : List[str] = key.split('''.''' ) _a : Dict = int(key_split[2] ) _a : Any = int(key_split[4] ) _a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: _a : str = val[:dim, :] _a : List[Any] = val[ dim : dim * 2, : ] _a : Tuple = val[-dim:, :] else: _a : Optional[Any] = val[ :dim ] _a : List[Any] = val[ dim : dim * 2 ] _a : str = val[ -dim: ] else: _a : Dict = val return orig_state_dict def __UpperCAmelCase ( __a : Any ,__a : Optional[Any] ,__a : Union[str, Any] ,__a : Any ) -> List[Any]: """simple docstring""" _a : Any = torch.load(__a ,map_location='''cpu''' )['''model'''] _a : Optional[Any] = get_swin_config(__a ) _a : int = SwinForMaskedImageModeling(__a ) model.eval() _a : List[str] = convert_state_dict(__a ,__a ) model.load_state_dict(__a ) _a : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _a : Tuple = ViTImageProcessor(size={'''height''': 192, '''width''': 192} ) _a : Any = Image.open(requests.get(__a ,stream=__a ).raw ) _a : str = image_processor(images=__a ,return_tensors='''pt''' ) with torch.no_grad(): _a : List[Any] = model(**__a ).logits print(outputs.keys() ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__a ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__a ) if push_to_hub: print(F"""Pushing model and image processor for {model_name} to hub""" ) model.push_to_hub(F"""microsoft/{model_name}""" ) image_processor.push_to_hub(F"""microsoft/{model_name}""" ) if __name__ == "__main__": a__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''swin-base-simmim-window6-192''', type=str, choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''], help='''Name of the Swin SimMIM model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''', type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) a__ = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
360
import numpy as np def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : float = 1E-12 ,__a : int = 100 ,) -> tuple[float, np.ndarray]: """simple docstring""" assert np.shape(__a )[0] == np.shape(__a )[1] # Ensure proper dimensionality. assert np.shape(__a )[0] == np.shape(__a )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(__a ) == np.iscomplexobj(__a ) _a : List[str] = np.iscomplexobj(__a ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(__a ,input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. _a : List[str] = False _a : List[str] = 0 _a : Tuple = 0 _a : str = 1E12 while not convergence: # Multiple matrix by the vector. _a : str = np.dot(__a ,__a ) # Normalize the resulting output vector. _a : List[Any] = w / np.linalg.norm(__a ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) _a : Dict = vector.conj().T if is_complex else vector.T _a : Tuple = np.dot(__a ,np.dot(__a ,__a ) ) # Check convergence. _a : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: _a : Dict = True _a : str = lambda_ if is_complex: _a : Tuple = np.real(lambda_ ) return lambda_, vector def __UpperCAmelCase ( ) -> None: """simple docstring""" _a : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) _a : int = np.array([41, 4, 20] ) _a : Optional[Any] = real_input_matrix.astype(np.complexaaa ) _a : int = np.triu(1j * complex_input_matrix ,1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T _a : Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": _a : Optional[int] = real_input_matrix _a : Union[str, Any] = real_vector elif problem_type == "complex": _a : str = complex_input_matrix _a : str = complex_vector # Our implementation. _a , _a : Optional[Any] = power_iteration(__a ,__a ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). _a , _a : List[str] = np.linalg.eigh(__a ) # Last eigenvalue is the maximum one. _a : Tuple = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. _a : List[Any] = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(__a ) - np.abs(__a ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
15
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase_ = { "configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"], "tokenization_perceiver": ["PerceiverTokenizer"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["PerceiverFeatureExtractor"] lowerCAmelCase_ = ["PerceiverImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ "PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST", "PerceiverForImageClassificationConvProcessing", "PerceiverForImageClassificationFourier", "PerceiverForImageClassificationLearned", "PerceiverForMaskedLM", "PerceiverForMultimodalAutoencoding", "PerceiverForOpticalFlow", "PerceiverForSequenceClassification", "PerceiverLayer", "PerceiverModel", "PerceiverPreTrainedModel", ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class A_ (unittest.TestCase ): @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house UpperCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase = model(_A )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) ) @slow def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) UpperCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house UpperCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim UpperCAmelCase = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): UpperCAmelCase = model(_A )['''last_hidden_state'''].detach() self.assertEqual(output.shape , _A ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , _A , atol=1E-3 ) )
273
0
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class __A ( a ): def _snake_case ( self , UpperCAmelCase_ ): with open(UpperCAmelCase_ , encoding="""utf-8""" ) as input_file: lowerCamelCase =re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" ) lowerCamelCase =input_file.read() lowerCamelCase =regexp.search(UpperCAmelCase_ ) return match def _snake_case ( self , UpperCAmelCase_ ): with open(UpperCAmelCase_ , encoding="""utf-8""" ) as input_file: lowerCamelCase =re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL ) lowerCamelCase =input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` lowerCamelCase =regexp.finditer(UpperCAmelCase_ ) lowerCamelCase =[match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def _snake_case ( self ): lowerCamelCase =Path("""./datasets""" ) lowerCamelCase =list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(UpperCAmelCase_ ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def _snake_case ( self ): lowerCamelCase =Path("""./datasets""" ) lowerCamelCase =list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_print_statements(str(UpperCAmelCase_ ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
262
import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def _lowercase ( _UpperCAmelCase ) -> str: lowerCamelCase =[] for line in lines: lowerCamelCase =re.sub(r"""#.*""" , """""" , _UpperCAmelCase ) # remove comments if line: filtered_lines.append(_UpperCAmelCase ) lowerCamelCase ="""\n""".join(_UpperCAmelCase ) # Make a hash from all this code lowerCamelCase =full_str.encode("""utf-8""" ) return shaaaa(_UpperCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCAmelCase__ : str ={ '''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), '''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), '''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), '''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), '''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), '''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), '''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), '''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCAmelCase__ : Tuple ={ '''.csv''': ('''csv''', {}), '''.tsv''': ('''csv''', {'''sep''': '''\t'''}), '''.json''': ('''json''', {}), '''.jsonl''': ('''json''', {}), '''.parquet''': ('''parquet''', {}), '''.arrow''': ('''arrow''', {}), '''.txt''': ('''text''', {}), } _EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCAmelCase__ : Optional[Any] ={'''imagefolder''', '''audiofolder'''} # Used to filter data files based on extensions given a module name UpperCAmelCase__ : Dict[str, List[str]] ={} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''') _MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
262
1
"""simple docstring""" # Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() _a : Union[str, Any] = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model _a : Union[str, Any] = { # fairseq: 'wmt19-ru-en': {'length_penalty': 1.1}, 'wmt19-en-ru': {'length_penalty': 1.15}, 'wmt19-en-de': {'length_penalty': 1.0}, 'wmt19-de-en': {'length_penalty': 1.1}, # allenai: 'wmt16-en-de-dist-12-1': {'length_penalty': 0.6}, 'wmt16-en-de-dist-6-1': {'length_penalty': 0.6}, 'wmt16-en-de-12-1': {'length_penalty': 0.8}, 'wmt19-de-en-6-6-base': {'length_penalty': 0.6}, 'wmt19-de-en-6-6-big': {'length_penalty': 0.6}, } # this remaps the different models to their organization names _a : Tuple = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: _a : Union[str, Any] = 'facebook' for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: _a : Union[str, Any] = 'allenai' def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Dict: _lowerCAmelCase : List[Any] = dict((re.sub(r"""@@$""" ,"""""" ,__lowerCAmelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" ,"""</w>""" ,__lowerCAmelCase ), v) for k, v in d.items() ) _lowerCAmelCase : List[str] = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[f"{k}</w>"] _lowerCAmelCase : List[str] = d[k] # restore return da def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any ) -> Optional[int]: assert os.path.exists(__lowerCAmelCase ) os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase ) print(f"Writing results to {pytorch_dump_folder_path}" ) # handle various types of models _lowerCAmelCase : int = basename(__lowerCAmelCase ) _lowerCAmelCase : List[str] = dirname(__lowerCAmelCase ) _lowerCAmelCase : Optional[int] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel _lowerCAmelCase : Tuple = cls.hub_models() _lowerCAmelCase : Dict = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''} _lowerCAmelCase : List[Any] = '''.''' # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(f"using checkpoint {checkpoint_file}" ) _lowerCAmelCase : Dict = hub_utils.from_pretrained( __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,archive_map=__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCAmelCase : Tuple = vars(chkpt["""args"""]["""model"""] ) _lowerCAmelCase : Union[str, Any] = args['''source_lang'''] _lowerCAmelCase : Optional[int] = args['''target_lang'''] _lowerCAmelCase : Optional[Any] = dirname(__lowerCAmelCase ) _lowerCAmelCase : int = basename(__lowerCAmelCase ) # dicts _lowerCAmelCase : List[str] = os.path.join(__lowerCAmelCase ,f"dict.{src_lang}.txt" ) _lowerCAmelCase : Tuple = os.path.join(__lowerCAmelCase ,f"dict.{tgt_lang}.txt" ) _lowerCAmelCase : Union[str, Any] = Dictionary.load(__lowerCAmelCase ) _lowerCAmelCase : List[str] = rewrite_dict_keys(src_dict.indices ) _lowerCAmelCase : List[Any] = len(__lowerCAmelCase ) _lowerCAmelCase : List[str] = os.path.join(__lowerCAmelCase ,"""vocab-src.json""" ) print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records" ) with open(__lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(__lowerCAmelCase ,ensure_ascii=__lowerCAmelCase ,indent=__lowerCAmelCase ) ) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab _lowerCAmelCase : List[Any] = True for k in src_vocab.keys(): if not k.islower(): _lowerCAmelCase : Any = False break _lowerCAmelCase : Optional[Any] = Dictionary.load(__lowerCAmelCase ) _lowerCAmelCase : int = rewrite_dict_keys(tgt_dict.indices ) _lowerCAmelCase : Any = len(__lowerCAmelCase ) _lowerCAmelCase : Dict = os.path.join(__lowerCAmelCase ,"""vocab-tgt.json""" ) print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records" ) with open(__lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(__lowerCAmelCase ,ensure_ascii=__lowerCAmelCase ,indent=__lowerCAmelCase ) ) # merges_file (bpecodes) _lowerCAmelCase : Tuple = os.path.join(__lowerCAmelCase ,VOCAB_FILES_NAMES["""merges_file"""] ) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" _lowerCAmelCase : Dict = os.path.join(__lowerCAmelCase ,__lowerCAmelCase ) if os.path.exists(__lowerCAmelCase ): break with open(__lowerCAmelCase ,encoding="""utf-8""" ) as fin: _lowerCAmelCase : Union[str, Any] = fin.read() _lowerCAmelCase : str = re.sub(r""" \d+$""" ,"""""" ,__lowerCAmelCase ,0 ,re.M ) # remove frequency number print(f"Generating {merges_file}" ) with open(__lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as fout: fout.write(__lowerCAmelCase ) # model config _lowerCAmelCase : List[Any] = os.path.join(__lowerCAmelCase ,"""config.json""" ) # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}" assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}" _lowerCAmelCase : Tuple = { '''architectures''': ['''FSMTForConditionalGeneration'''], '''model_type''': '''fsmt''', '''activation_dropout''': args['''activation_dropout'''], '''activation_function''': '''relu''', '''attention_dropout''': args['''attention_dropout'''], '''d_model''': args['''decoder_embed_dim'''], '''dropout''': args['''dropout'''], '''init_std''': 0.02, '''max_position_embeddings''': args['''max_source_positions'''], '''num_hidden_layers''': args['''encoder_layers'''], '''src_vocab_size''': src_vocab_size, '''tgt_vocab_size''': tgt_vocab_size, '''langs''': [src_lang, tgt_lang], '''encoder_attention_heads''': args['''encoder_attention_heads'''], '''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''], '''encoder_layerdrop''': args['''encoder_layerdrop'''], '''encoder_layers''': args['''encoder_layers'''], '''decoder_attention_heads''': args['''decoder_attention_heads'''], '''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''], '''decoder_layerdrop''': args['''decoder_layerdrop'''], '''decoder_layers''': args['''decoder_layers'''], '''bos_token_id''': 0, '''pad_token_id''': 1, '''eos_token_id''': 2, '''is_encoder_decoder''': True, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_all_embeddings'''], } # good hparam defaults to start with _lowerCAmelCase : Any = 5 _lowerCAmelCase : Optional[Any] = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: _lowerCAmelCase : Optional[Any] = best_score_hparams[model_dir]['''length_penalty'''] else: _lowerCAmelCase : Optional[Any] = 1.0 print(f"Generating {fsmt_model_config_file}" ) with open(__lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(__lowerCAmelCase ,ensure_ascii=__lowerCAmelCase ,indent=__lowerCAmelCase ) ) # tokenizer config _lowerCAmelCase : Tuple = os.path.join(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCAmelCase : Optional[Any] = { '''langs''': [src_lang, tgt_lang], '''model_max_length''': 1024, '''do_lower_case''': do_lower_case, } print(f"Generating {fsmt_tokenizer_config_file}" ) with open(__lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(__lowerCAmelCase ,ensure_ascii=__lowerCAmelCase ,indent=__lowerCAmelCase ) ) # model _lowerCAmelCase : Dict = chkpt['''models'''][0] _lowerCAmelCase : int = model.state_dict() # rename keys to start with 'model.' _lowerCAmelCase : str = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() ) # remove unneeded keys _lowerCAmelCase : List[str] = [ '''model.model''', '''model.encoder.version''', '''model.decoder.version''', '''model.encoder_embed_tokens.weight''', '''model.decoder_embed_tokens.weight''', '''model.encoder.embed_positions._float_tensor''', '''model.decoder.embed_positions._float_tensor''', ] for k in ignore_keys: model_state_dict.pop(__lowerCAmelCase ,__lowerCAmelCase ) _lowerCAmelCase : str = FSMTConfig.from_pretrained(__lowerCAmelCase ) _lowerCAmelCase : List[Any] = FSMTForConditionalGeneration(__lowerCAmelCase ) # check that it loads ok model_new.load_state_dict(__lowerCAmelCase ,strict=__lowerCAmelCase ) # save _lowerCAmelCase : Optional[Any] = os.path.join(__lowerCAmelCase ,__lowerCAmelCase ) print(f"Generating {pytorch_weights_dump_path}" ) torch.save(__lowerCAmelCase ,__lowerCAmelCase ) print("""Conversion is done!""" ) print("""\nLast step is to upload the files to s3""" ) print(f"cd {data_root}" ) print(f"transformers-cli upload {model_dir}" ) if __name__ == "__main__": _a : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--fsmt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _a : Union[str, Any] = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
44
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class a ( __lowerCamelCase , unittest.TestCase ): __lowerCAmelCase : int = BertTokenizer __lowerCAmelCase : int = BertTokenizerFast __lowerCAmelCase : int = True __lowerCAmelCase : List[Any] = True __lowerCAmelCase : Any = filter_non_english def __lowerCamelCase ( self :str ): super().setUp() snake_case__ : str = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] snake_case__ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __lowerCamelCase ( self :List[str] ,__lowercase :str ): snake_case__ : List[Any] = '''UNwant\u00E9d,running''' snake_case__ : str = '''unwanted, running''' return input_text, output_text def __lowerCamelCase ( self :Optional[int] ): snake_case__ : str = self.tokenizer_class(self.vocab_file ) snake_case__ : str = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__lowercase ,['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) ,[9, 6, 7, 1_2, 1_0, 1_1] ) def __lowerCamelCase ( self :List[str] ): if not self.test_rust_tokenizer: return snake_case__ : str = self.get_tokenizer() snake_case__ : Dict = self.get_rust_tokenizer() snake_case__ : List[Any] = '''UNwant\u00E9d,running''' snake_case__ : Dict = tokenizer.tokenize(__lowercase ) snake_case__ : int = rust_tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase ,__lowercase ) snake_case__ : List[str] = tokenizer.encode(__lowercase ,add_special_tokens=__lowercase ) snake_case__ : List[str] = rust_tokenizer.encode(__lowercase ,add_special_tokens=__lowercase ) self.assertListEqual(__lowercase ,__lowercase ) snake_case__ : Optional[Any] = self.get_rust_tokenizer() snake_case__ : List[Any] = tokenizer.encode(__lowercase ) snake_case__ : Dict = rust_tokenizer.encode(__lowercase ) self.assertListEqual(__lowercase ,__lowercase ) # With lower casing snake_case__ : List[Any] = self.get_tokenizer(do_lower_case=__lowercase ) snake_case__ : Optional[int] = self.get_rust_tokenizer(do_lower_case=__lowercase ) snake_case__ : int = '''UNwant\u00E9d,running''' snake_case__ : List[str] = tokenizer.tokenize(__lowercase ) snake_case__ : Union[str, Any] = rust_tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase ,__lowercase ) snake_case__ : int = tokenizer.encode(__lowercase ,add_special_tokens=__lowercase ) snake_case__ : List[str] = rust_tokenizer.encode(__lowercase ,add_special_tokens=__lowercase ) self.assertListEqual(__lowercase ,__lowercase ) snake_case__ : Tuple = self.get_rust_tokenizer() snake_case__ : int = tokenizer.encode(__lowercase ) snake_case__ : Dict = rust_tokenizer.encode(__lowercase ) self.assertListEqual(__lowercase ,__lowercase ) def __lowerCamelCase ( self :Tuple ): snake_case__ : Optional[Any] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) ,['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def __lowerCamelCase ( self :Any ): snake_case__ : Dict = BasicTokenizer(do_lower_case=__lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] ) def __lowerCamelCase ( self :List[str] ): snake_case__ : Optional[int] = BasicTokenizer(do_lower_case=__lowercase ,strip_accents=__lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''h\u00E9llo'''] ) def __lowerCamelCase ( self :str ): snake_case__ : Union[str, Any] = BasicTokenizer(do_lower_case=__lowercase ,strip_accents=__lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] ) def __lowerCamelCase ( self :Optional[Any] ): snake_case__ : Union[str, Any] = BasicTokenizer(do_lower_case=__lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] ) def __lowerCamelCase ( self :Optional[Any] ): snake_case__ : str = BasicTokenizer(do_lower_case=__lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def __lowerCamelCase ( self :Tuple ): snake_case__ : Any = BasicTokenizer(do_lower_case=__lowercase ,strip_accents=__lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def __lowerCamelCase ( self :int ): snake_case__ : str = BasicTokenizer(do_lower_case=__lowercase ,strip_accents=__lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def __lowerCamelCase ( self :Tuple ): snake_case__ : List[Any] = BasicTokenizer(do_lower_case=__lowercase ,never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def __lowerCamelCase ( self :Dict ): snake_case__ : List[str] = BasicTokenizer() snake_case__ : Any = '''a\n\'ll !!to?\'d of, can\'t.''' snake_case__ : Optional[int] = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.'''] self.assertListEqual(tokenizer.tokenize(__lowercase ) ,__lowercase ) def __lowerCamelCase ( self :List[str] ): snake_case__ : Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] snake_case__ : Optional[Any] = {} for i, token in enumerate(__lowercase ): snake_case__ : Dict = i snake_case__ : int = WordpieceTokenizer(vocab=__lowercase ,unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) ,[] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) ,['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) ,['''[UNK]''', '''runn''', '''##ing'''] ) def __lowerCamelCase ( self :List[Any] ): self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def __lowerCamelCase ( self :Union[str, Any] ): self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def __lowerCamelCase ( self :str ): self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def __lowerCamelCase ( self :List[str] ): snake_case__ : str = self.get_tokenizer() snake_case__ : List[str] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__lowercase ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(__lowercase ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] ) @slow def __lowerCamelCase ( self :List[str] ): snake_case__ : List[str] = self.tokenizer_class.from_pretrained('''bert-base-uncased''' ) snake_case__ : Union[str, Any] = tokenizer.encode('''sequence builders''' ,add_special_tokens=__lowercase ) snake_case__ : str = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=__lowercase ) snake_case__ : Tuple = tokenizer.build_inputs_with_special_tokens(__lowercase ) snake_case__ : int = tokenizer.build_inputs_with_special_tokens(__lowercase ,__lowercase ) assert encoded_sentence == [1_0_1] + text + [1_0_2] assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2] def __lowerCamelCase ( self :List[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): snake_case__ : str = self.rust_tokenizer_class.from_pretrained(__lowercase ,**__lowercase ) snake_case__ : List[str] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" snake_case__ : List[Any] = tokenizer_r.encode_plus( __lowercase ,return_attention_mask=__lowercase ,return_token_type_ids=__lowercase ,return_offsets_mapping=__lowercase ,add_special_tokens=__lowercase ,) snake_case__ : List[str] = tokenizer_r.do_lower_case if hasattr(__lowercase ,'''do_lower_case''' ) else False snake_case__ : Optional[int] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''Allen'''), ((2_1, 2_3), '''##NL'''), ((2_3, 2_4), '''##P'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), '''allen'''), ((2_1, 2_3), '''##nl'''), ((2_3, 2_4), '''##p'''), ((2_5, 3_3), '''sentence'''), ((3_3, 3_4), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] ,tokens['''offset_mapping'''] ) def __lowerCamelCase ( self :List[str] ): snake_case__ : str = ['''的''', '''人''', '''有'''] snake_case__ : Optional[int] = ''''''.join(__lowercase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): snake_case__ : Optional[Any] = True snake_case__ : int = self.tokenizer_class.from_pretrained(__lowercase ,**__lowercase ) snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(__lowercase ,**__lowercase ) snake_case__ : Union[str, Any] = tokenizer_p.encode(__lowercase ,add_special_tokens=__lowercase ) snake_case__ : List[str] = tokenizer_r.encode(__lowercase ,add_special_tokens=__lowercase ) snake_case__ : List[str] = tokenizer_r.convert_ids_to_tokens(__lowercase ) snake_case__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(__lowercase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__lowercase ,__lowercase ) self.assertListEqual(__lowercase ,__lowercase ) snake_case__ : str = False snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(__lowercase ,**__lowercase ) snake_case__ : int = self.tokenizer_class.from_pretrained(__lowercase ,**__lowercase ) snake_case__ : int = tokenizer_r.encode(__lowercase ,add_special_tokens=__lowercase ) snake_case__ : Tuple = tokenizer_p.encode(__lowercase ,add_special_tokens=__lowercase ) snake_case__ : List[Any] = tokenizer_r.convert_ids_to_tokens(__lowercase ) snake_case__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowercase ) # it is expected that only the first Chinese character is not preceded by "##". snake_case__ : Optional[int] = [ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(__lowercase ) ] self.assertListEqual(__lowercase ,__lowercase ) self.assertListEqual(__lowercase ,__lowercase )
230
0
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class snake_case ( _UpperCamelCase ): def __init__( self : List[Any] , UpperCamelCase__ : str = "▁" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[str, AddedToken] = "<unk>" , UpperCamelCase__ : Union[str, AddedToken] = "</s>" , UpperCamelCase__ : Union[str, AddedToken] = "<pad>" , )-> int: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = { 'pad': {'id': 0, 'token': pad_token}, 'eos': {'id': 1, 'token': eos_token}, 'unk': {'id': 2, 'token': unk_token}, } __lowerCAmelCase: Optional[Any] = [None] * len(self.special_tokens) for token_dict in self.special_tokens.values(): __lowerCAmelCase: Optional[Any] = token_dict['token'] __lowerCAmelCase: Optional[int] = Tokenizer(Unigram()) __lowerCAmelCase: List[Any] = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}") , " "), normalizers.Lowercase(), ]) __lowerCAmelCase: Optional[int] = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase), pre_tokenizers.Digits(individual_digits=_UpperCAmelCase), pre_tokenizers.Punctuation(), ]) __lowerCAmelCase: int = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase) __lowerCAmelCase: Optional[Any] = TemplateProcessing( single=f"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , ) __lowerCAmelCase: str = { 'model': 'SentencePieceUnigram', 'replacement': replacement, 'add_prefix_space': add_prefix_space, } super().__init__(_UpperCAmelCase , _UpperCAmelCase) def lowercase_ ( self : str , UpperCamelCase__ : Union[str, List[str]] , UpperCamelCase__ : int = 8_0_0_0 , UpperCamelCase__ : bool = True , )-> int: '''simple docstring''' __lowerCAmelCase: int = trainers.UnigramTrainer( vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , ) if isinstance(_UpperCAmelCase , _UpperCAmelCase): __lowerCAmelCase: Union[str, Any] = [files] self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase) self.add_unk_id() def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Union[Iterator[str], Iterator[Iterator[str]]] , UpperCamelCase__ : int = 8_0_0_0 , UpperCamelCase__ : bool = True , )-> Tuple: '''simple docstring''' __lowerCAmelCase: Tuple = trainers.UnigramTrainer( vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , ) self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase) self.add_unk_id() def lowercase_ ( self : Any)-> Optional[Any]: '''simple docstring''' __lowerCAmelCase: Optional[Any] = json.loads(self._tokenizer.to_str()) __lowerCAmelCase: Any = self.special_tokens['unk']['id'] __lowerCAmelCase: int = Tokenizer.from_str(json.dumps(_UpperCAmelCase))
369
"""simple docstring""" import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class snake_case ( __snake_case, unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Tuple = PriorTransformer SCREAMING_SNAKE_CASE_ : List[str] = """hidden_states""" @property def lowercase_ ( self : Dict)-> str: '''simple docstring''' __lowerCAmelCase: str = 4 __lowerCAmelCase: int = 8 __lowerCAmelCase: int = 7 __lowerCAmelCase: str = floats_tensor((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: Optional[Any] = floats_tensor((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: Any = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowercase_ ( self : Optional[int] , UpperCamelCase__ : str=0)-> str: '''simple docstring''' torch.manual_seed(UpperCamelCase__) __lowerCAmelCase: List[Any] = 4 __lowerCAmelCase: Dict = 8 __lowerCAmelCase: int = 7 __lowerCAmelCase: List[str] = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: Tuple = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def lowercase_ ( self : Dict)-> List[Any]: '''simple docstring''' return (4, 8) @property def lowercase_ ( self : Optional[int])-> int: '''simple docstring''' return (4, 8) def lowercase_ ( self : Optional[int])-> Tuple: '''simple docstring''' __lowerCAmelCase: str = { "num_attention_heads": 2, "attention_head_dim": 4, "num_layers": 2, "embedding_dim": 8, "num_embeddings": 7, "additional_embeddings": 4, } __lowerCAmelCase: Any = self.dummy_input return init_dict, inputs_dict def lowercase_ ( self : List[Any])-> int: '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase: Optional[int] = PriorTransformer.from_pretrained( "hf-internal-testing/prior-dummy" , output_loading_info=UpperCamelCase__) self.assertIsNotNone(UpperCamelCase__) self.assertEqual(len(loading_info["missing_keys"]) , 0) model.to(UpperCamelCase__) __lowerCAmelCase: Dict = model(**self.dummy_input)[0] assert hidden_states is not None, "Make sure output is not None" def lowercase_ ( self : List[str])-> Tuple: '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.prepare_init_args_and_inputs_for_common() __lowerCAmelCase: Tuple = self.model_class(**UpperCamelCase__) __lowerCAmelCase: List[str] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase: List[Any] = [*signature.parameters.keys()] __lowerCAmelCase: Any = ["hidden_states", "timestep"] self.assertListEqual(arg_names[:2] , UpperCamelCase__) def lowercase_ ( self : Optional[int])-> List[str]: '''simple docstring''' __lowerCAmelCase: int = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy") __lowerCAmelCase: Union[str, Any] = model.to(UpperCamelCase__) if hasattr(UpperCamelCase__ , "set_default_attn_processor"): model.set_default_attn_processor() __lowerCAmelCase: str = self.get_dummy_seed_input() with torch.no_grad(): __lowerCAmelCase: Dict = model(**UpperCamelCase__)[0] __lowerCAmelCase: Dict = output[0, :5].flatten().cpu() print(UpperCamelCase__) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __lowerCAmelCase: List[str] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239]) self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2)) @slow class snake_case ( unittest.TestCase ): def lowercase_ ( self : int , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : str=7_6_8 , UpperCamelCase__ : int=7_7 , UpperCamelCase__ : Any=0)-> Union[str, Any]: '''simple docstring''' torch.manual_seed(UpperCamelCase__) __lowerCAmelCase: List[Any] = batch_size __lowerCAmelCase: Any = embedding_dim __lowerCAmelCase: Dict = num_embeddings __lowerCAmelCase: Dict = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: str = torch.randn((batch_size, embedding_dim)).to(UpperCamelCase__) __lowerCAmelCase: int = torch.randn((batch_size, num_embeddings, embedding_dim)).to(UpperCamelCase__) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowercase_ ( self : List[Any])-> Union[str, Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [1_3, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]], [3_7, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]], # fmt: on ]) def lowercase_ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int)-> List[Any]: '''simple docstring''' __lowerCAmelCase: List[str] = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior") model.to(UpperCamelCase__) __lowerCAmelCase: Dict = self.get_dummy_seed_input(seed=UpperCamelCase__) with torch.no_grad(): __lowerCAmelCase: Optional[Any] = model(**UpperCamelCase__)[0] assert list(sample.shape) == [1, 7_6_8] __lowerCAmelCase: Dict = sample[0, :8].flatten().cpu() print(UpperCamelCase__) __lowerCAmelCase: Union[str, Any] = torch.tensor(UpperCamelCase__) assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3)
108
0
import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase="shi-labs/oneformer_demo" ) -> List[Any]: with open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) as f: lowerCamelCase__ : str = json.load(_UpperCAmelCase ) lowerCamelCase__ : Tuple = {} lowerCamelCase__ : List[Any] = [] lowerCamelCase__ : str = [] for key, info in class_info.items(): lowerCamelCase__ : Union[str, Any] = info['name'] class_names.append(info['name'] ) if info["isthing"]: thing_ids.append(int(_UpperCAmelCase ) ) lowerCamelCase__ : Optional[int] = thing_ids lowerCamelCase__ : Union[str, Any] = class_names return metadata class lowerCAmelCase ( unittest.TestCase ): def __init__( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : int=30 , UpperCAmelCase : List[str]=400 , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Any=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Dict=[0.5, 0.5, 0.5] , UpperCAmelCase : List[str]=[0.5, 0.5, 0.5] , UpperCAmelCase : Optional[Any]=10 , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[int]=255 , UpperCAmelCase : Any="shi-labs/oneformer_demo" , UpperCAmelCase : Any="ade20k_panoptic.json" , UpperCAmelCase : List[Any]=10 , ) -> Union[str, Any]: lowerCamelCase__ : Tuple = parent lowerCamelCase__ : Tuple = batch_size lowerCamelCase__ : str = num_channels lowerCamelCase__ : Union[str, Any] = min_resolution lowerCamelCase__ : int = max_resolution lowerCamelCase__ : Dict = do_resize lowerCamelCase__ : Optional[int] = {'shortest_edge': 32, 'longest_edge': 1333} if size is None else size lowerCamelCase__ : Dict = do_normalize lowerCamelCase__ : Tuple = image_mean lowerCamelCase__ : List[str] = image_std lowerCamelCase__ : Any = class_info_file lowerCamelCase__ : Any = prepare_metadata(UpperCAmelCase , UpperCAmelCase ) lowerCamelCase__ : Optional[int] = num_text lowerCamelCase__ : List[str] = repo_path # for the post_process_functions lowerCamelCase__ : Any = 2 lowerCamelCase__ : str = 10 lowerCamelCase__ : str = 10 lowerCamelCase__ : Any = 3 lowerCamelCase__ : Union[str, Any] = 4 lowerCamelCase__ : Any = num_labels lowerCamelCase__ : str = do_reduce_labels lowerCamelCase__ : str = ignore_index def A_ ( self : Union[str, Any] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def A_ ( self : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any]=False ) -> int: if not batched: lowerCamelCase__ : List[str] = image_inputs[0] if isinstance(UpperCAmelCase , Image.Image ): lowerCamelCase__ , lowerCamelCase__ : Tuple = image.size else: lowerCamelCase__ , lowerCamelCase__ : Dict = image.shape[1], image.shape[2] if w < h: lowerCamelCase__ : Dict = int(self.size['shortest_edge'] * h / w ) lowerCamelCase__ : List[Any] = self.size['shortest_edge'] elif w > h: lowerCamelCase__ : Optional[Any] = self.size['shortest_edge'] lowerCamelCase__ : str = int(self.size['shortest_edge'] * w / h ) else: lowerCamelCase__ : str = self.size['shortest_edge'] lowerCamelCase__ : Union[str, Any] = self.size['shortest_edge'] else: lowerCamelCase__ : Any = [] for image in image_inputs: lowerCamelCase__ , lowerCamelCase__ : List[str] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCamelCase__ : Optional[Any] = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[0] )[0] lowerCamelCase__ : str = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[1] )[1] return expected_height, expected_width def A_ ( self : Tuple ) -> Tuple: return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ): UpperCAmelCase__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string UpperCAmelCase__ = image_processing_class def A_ ( self : Any ) -> int: lowerCamelCase__ : Union[str, Any] = OneFormerImageProcessorTester(self ) @property def A_ ( self : str ) -> int: return self.image_processing_tester.prepare_image_processor_dict() def A_ ( self : int ) -> Any: lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase , 'image_mean' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'image_std' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'size' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'ignore_index' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'class_info_file' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'num_text' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'repo_path' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'metadata' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'do_reduce_labels' ) ) def A_ ( self : str ) -> List[Any]: pass def A_ ( self : Tuple ) -> Union[str, Any]: # Initialize image_processor lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , Image.Image ) # Test not batched input lowerCamelCase__ : List[str] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values lowerCamelCase__ , lowerCamelCase__ : str = self.image_processing_tester.get_expected_values(UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ , lowerCamelCase__ : Tuple = self.image_processing_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase ) lowerCamelCase__ : List[str] = image_processor( UpperCAmelCase , ['semantic'] * len(UpperCAmelCase ) , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A_ ( self : Tuple ) -> str: # Initialize image_processor lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , np.ndarray ) # Test not batched input lowerCamelCase__ : List[str] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values lowerCamelCase__ , lowerCamelCase__ : List[str] = self.image_processing_tester.get_expected_values(UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ , lowerCamelCase__ : List[str] = self.image_processing_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase ) lowerCamelCase__ : str = image_processor( UpperCAmelCase , ['semantic'] * len(UpperCAmelCase ) , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A_ ( self : Optional[int] ) -> Union[str, Any]: # Initialize image_processor lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , torch.Tensor ) # Test not batched input lowerCamelCase__ : Union[str, Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values lowerCamelCase__ , lowerCamelCase__ : str = self.image_processing_tester.get_expected_values(UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ , lowerCamelCase__ : int = self.image_processing_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase ) lowerCamelCase__ : int = image_processor( UpperCAmelCase , ['semantic'] * len(UpperCAmelCase ) , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A_ ( self : int , UpperCAmelCase : List[str]=False , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Union[str, Any]="np" ) -> str: lowerCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # prepare image and target lowerCamelCase__ : Dict = self.image_processing_tester.num_labels lowerCamelCase__ : List[str] = None lowerCamelCase__ : Optional[int] = None lowerCamelCase__ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase ) if with_segmentation_maps: lowerCamelCase__ : Tuple = num_labels if is_instance_map: lowerCamelCase__ : Dict = list(range(UpperCAmelCase ) ) * 2 lowerCamelCase__ : Optional[int] = dict(enumerate(UpperCAmelCase ) ) lowerCamelCase__ : int = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": lowerCamelCase__ : Optional[int] = [Image.fromarray(UpperCAmelCase ) for annotation in annotations] lowerCamelCase__ : List[str] = image_processor( UpperCAmelCase , ['semantic'] * len(UpperCAmelCase ) , UpperCAmelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCAmelCase , pad_and_return_pixel_mask=UpperCAmelCase , ) return inputs def A_ ( self : str ) -> Any: pass def A_ ( self : Tuple ) -> List[Any]: def common(UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Optional[Any]=None ): lowerCamelCase__ : Any = self.comm_get_image_processor_inputs( with_segmentation_maps=UpperCAmelCase , is_instance_map=UpperCAmelCase , segmentation_type=UpperCAmelCase ) lowerCamelCase__ : Tuple = inputs['mask_labels'] lowerCamelCase__ : Union[str, Any] = inputs['class_labels'] lowerCamelCase__ : Optional[Any] = inputs['pixel_values'] lowerCamelCase__ : List[Any] = inputs['text_inputs'] # check the batch_size for mask_label, class_label, text_input in zip(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(UpperCAmelCase ) , self.image_processing_tester.num_text ) common() common(is_instance_map=UpperCAmelCase ) common(is_instance_map=UpperCAmelCase , segmentation_type='pil' ) common(is_instance_map=UpperCAmelCase , segmentation_type='pil' ) def A_ ( self : Optional[int] ) -> Any: lowerCamelCase__ : Dict = np.zeros((20, 50) ) lowerCamelCase__ : List[Any] = 1 lowerCamelCase__ : Dict = 1 lowerCamelCase__ : Optional[int] = 1 lowerCamelCase__ : Union[str, Any] = binary_mask_to_rle(UpperCAmelCase ) self.assertEqual(len(UpperCAmelCase ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def A_ ( self : Union[str, Any] ) -> str: lowerCamelCase__ : str = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , ) lowerCamelCase__ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs() lowerCamelCase__ : Any = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase ) self.assertEqual(len(UpperCAmelCase ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) lowerCamelCase__ : Any = [(1, 4) for i in range(self.image_processing_tester.batch_size )] lowerCamelCase__ : Dict = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase , target_sizes=UpperCAmelCase ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def A_ ( self : List[str] ) -> List[str]: lowerCamelCase__ : Tuple = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , ) lowerCamelCase__ : Union[str, Any] = self.image_processing_tester.get_fake_oneformer_outputs() lowerCamelCase__ : str = image_processor.post_process_instance_segmentation(UpperCAmelCase , threshold=0 ) self.assertTrue(len(UpperCAmelCase ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('segmentation' in el ) self.assertTrue('segments_info' in el ) self.assertEqual(type(el['segments_info'] ) , UpperCAmelCase ) self.assertEqual( el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def A_ ( self : Any ) -> Union[str, Any]: lowerCamelCase__ : int = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , ) lowerCamelCase__ : int = self.image_processing_tester.get_fake_oneformer_outputs() lowerCamelCase__ : Tuple = image_processor.post_process_panoptic_segmentation(UpperCAmelCase , threshold=0 ) self.assertTrue(len(UpperCAmelCase ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('segmentation' in el ) self.assertTrue('segments_info' in el ) self.assertEqual(type(el['segments_info'] ) , UpperCAmelCase ) self.assertEqual( el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
50
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase__ : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : int = ['YolosFeatureExtractor'] UpperCamelCase__ : int = ['YolosImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Dict = [ 'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST', 'YolosForObjectDetection', 'YolosModel', 'YolosPreTrainedModel', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
344
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess SCREAMING_SNAKE_CASE = frozenset([] ) def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[int]: """simple docstring""" torch.manual_seed(0) __lowerCAmelCase : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE) torch.manual_seed(0) __lowerCAmelCase : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) __lowerCAmelCase : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) __lowerCAmelCase : List[Any] = CLIPTextModel(_SCREAMING_SNAKE_CASE) __lowerCAmelCase : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") __lowerCAmelCase : Tuple = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]=0) -> Optional[int]: """simple docstring""" __lowerCAmelCase : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE)).to(_SCREAMING_SNAKE_CASE) __lowerCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1)[0] __lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE)).convert("RGB").resize((64, 64)) __lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64)) if str(_SCREAMING_SNAKE_CASE).startswith("mps"): __lowerCAmelCase : Dict = torch.manual_seed(_SCREAMING_SNAKE_CASE) else: __lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE).manual_seed(_SCREAMING_SNAKE_CASE) __lowerCAmelCase : List[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase : Optional[Any] = self.get_dummy_components() __lowerCAmelCase : str = StableDiffusionInpaintPipeline(**_SCREAMING_SNAKE_CASE) __lowerCAmelCase : Dict = sd_pipe.to(_SCREAMING_SNAKE_CASE) sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE) __lowerCAmelCase : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE) __lowerCAmelCase : int = sd_pipe(**_SCREAMING_SNAKE_CASE).images __lowerCAmelCase : str = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : str = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Dict: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class A__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[Any]: """simple docstring""" __lowerCAmelCase : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") __lowerCAmelCase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") __lowerCAmelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy") __lowerCAmelCase : Optional[int] = "stabilityai/stable-diffusion-2-inpainting" __lowerCAmelCase : str = StableDiffusionInpaintPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE) pipe.to(_SCREAMING_SNAKE_CASE) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE) pipe.enable_attention_slicing() __lowerCAmelCase : Any = "Face of a yellow cat, high resolution, sitting on a park bench" __lowerCAmelCase : Dict = torch.manual_seed(0) __lowerCAmelCase : Any = pipe( prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type="np" , ) __lowerCAmelCase : Union[str, Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Tuple: """simple docstring""" __lowerCAmelCase : int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") __lowerCAmelCase : Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") __lowerCAmelCase : Optional[int] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy") __lowerCAmelCase : List[str] = "stabilityai/stable-diffusion-2-inpainting" __lowerCAmelCase : List[Any] = StableDiffusionInpaintPipeline.from_pretrained( _SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , safety_checker=_SCREAMING_SNAKE_CASE , ) pipe.to(_SCREAMING_SNAKE_CASE) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE) pipe.enable_attention_slicing() __lowerCAmelCase : Dict = "Face of a yellow cat, high resolution, sitting on a park bench" __lowerCAmelCase : List[Any] = torch.manual_seed(0) __lowerCAmelCase : str = pipe( prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type="np" , ) __lowerCAmelCase : Optional[int] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[str]: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowerCAmelCase : Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") __lowerCAmelCase : List[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") __lowerCAmelCase : Union[str, Any] = "stabilityai/stable-diffusion-2-inpainting" __lowerCAmelCase : int = PNDMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder="scheduler") __lowerCAmelCase : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained( _SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , ) pipe.to(_SCREAMING_SNAKE_CASE) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() __lowerCAmelCase : str = "Face of a yellow cat, high resolution, sitting on a park bench" __lowerCAmelCase : Union[str, Any] = torch.manual_seed(0) __lowerCAmelCase : int = pipe( prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="np" , ) __lowerCAmelCase : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
58
"""simple docstring""" def _lowercase ( __snake_case ) -> int: if not isinstance(__snake_case ,__snake_case ): raise ValueError("Input must be an integer" ) if input_num <= 0: raise ValueError("Input must be positive" ) return sum( divisor for divisor in range(1 ,input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
58
1
"""simple docstring""" import argparse import math import traceback import dateutil.parser as date_parser import requests def UpperCAmelCase__ ( _UpperCAmelCase ): """simple docstring""" A_ : List[Any] = {} A_ : Union[str, Any] = job['started_at'] A_ : int = job['completed_at'] A_ : Optional[Any] = date_parser.parse(a_ ) A_ : Union[str, Any] = date_parser.parse(a_ ) A_ : List[str] = round((end_datetime - start_datetime).total_seconds() / 60.0 ) A_ : Dict = start A_ : Union[str, Any] = end A_ : Union[str, Any] = duration_in_min return job_info def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=None ): """simple docstring""" A_ : List[str] = None if token is not None: A_ : List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""} A_ : Optional[int] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" A_ : str = requests.get(a_ , headers=a_ ).json() A_ : Optional[Any] = {} try: job_time.update({job['name']: extract_time_from_single_job(a_ ) for job in result['jobs']} ) A_ : Any = math.ceil((result['total_count'] - 100) / 100 ) for i in range(a_ ): A_ : int = requests.get(url + f"""&page={i + 2}""" , headers=a_ ).json() job_time.update({job['name']: extract_time_from_single_job(a_ ) for job in result['jobs']} ) return job_time except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} if __name__ == "__main__": lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') lowerCamelCase_ : Optional[int] = parser.parse_args() lowerCamelCase_ : Union[str, Any] = get_job_time(args.workflow_run_id) lowerCamelCase_ : Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F"{k}: {v['duration']}")
286
from typing import Dict, Optional import numpy as np import datasets SCREAMING_SNAKE_CASE :List[Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n' SCREAMING_SNAKE_CASE :List[str] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n' SCREAMING_SNAKE_CASE :str = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}' def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Tuple: """simple docstring""" if label_map is not None: for old_id, new_id in label_map.items(): __A = new_id # turn into Numpy arrays __A = np.array(a_ ) __A = np.array(a_ ) if reduce_labels: __A = 2_5_5 __A = label - 1 __A = 2_5_5 __A = label != ignore_index __A = np.not_equal(a_ , a_ ) __A = pred_label[mask] __A = np.array(a_ )[mask] __A = pred_label[pred_label == label] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0] __A = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Union[str, Any]: """simple docstring""" __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) __A = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(a_ , a_ ): __A , __A , __A , __A = intersect_and_union( a_ , a_ , a_ , a_ , a_ , a_ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = None , a_ = False , ) -> str: """simple docstring""" __A , __A , __A , __A = total_intersect_and_union( a_ , a_ , a_ , a_ , a_ , a_ ) # compute metrics __A = {} __A = total_area_intersect.sum() / total_area_label.sum() __A = total_area_intersect / total_area_union __A = total_area_intersect / total_area_label __A = np.nanmean(a_ ) __A = np.nanmean(a_ ) __A = all_acc __A = iou __A = acc if nan_to_num is not None: __A = {metric: np.nan_to_num(a_ , nan=a_ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self : List[Any] ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { "predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), "references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), } ) ,reference_urls=[ "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" ] ,) def UpperCamelCase_ ( self : int ,A : Optional[Any] ,A : Optional[Any] ,A : int ,A : bool ,A : Optional[int] = None ,A : Optional[Dict[int, int]] = None ,A : bool = False ,): __A = mean_iou( results=A ,gt_seg_maps=A ,num_labels=A ,ignore_index=A ,nan_to_num=A ,label_map=A ,reduce_labels=A ,) return iou_result
15
0
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class _snake_case ( unittest.TestCase ): '''simple docstring''' A__ : Tuple = inspect.getfile(accelerate.test_utils ) A__ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] ) A__ : List[str] = ["accelerate", "launch"] A__ : List[Any] = Path.home() / ".cache/huggingface/accelerate" A__ : Tuple = "default_config.yaml" A__ : Tuple = config_folder / config_file A__ : int = config_folder / "_default_config.yaml" A__ : Optional[Any] = Path("tests/test_configs" ) @classmethod def A__ ( cls: int ) -> Optional[Any]: if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def A__ ( cls: Any ) -> Optional[Any]: if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def A__ ( self: Tuple ) -> str: UpperCAmelCase_ : str = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] ,env=os.environ.copy() ) def A__ ( self: List[Any] ) -> str: for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ): with self.subTest(config_file=lowerCamelCase_ ): execute_subprocess_async( self.base_cmd + ["""--config_file""", str(lowerCamelCase_ ), self.test_file_path] ,env=os.environ.copy() ) def A__ ( self: Union[str, Any] ) -> Tuple: execute_subprocess_async(["""accelerate""", """test"""] ,env=os.environ.copy() ) class _snake_case ( unittest.TestCase ): '''simple docstring''' A__ : Optional[int] = "test-tpu" A__ : Optional[int] = "us-central1-a" A__ : Dict = "ls" A__ : Optional[int] = ["accelerate", "tpu-config"] A__ : Optional[Any] = "cd /usr/share" A__ : Optional[Any] = "tests/test_samples/test_command_file.sh" A__ : int = "Running gcloud compute tpus tpu-vm ssh" def A__ ( self: Dict ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = run_command( self.cmd + ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] ,return_stdout=lowerCamelCase_ ,) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' ,lowerCamelCase_ ,) def A__ ( self: Optional[int] ) -> Optional[int]: UpperCAmelCase_ : int = run_command( self.cmd + [ """--config_file""", """tests/test_configs/0_12_0.yaml""", """--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug""", ] ,return_stdout=lowerCamelCase_ ,) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' ,lowerCamelCase_ ,) def A__ ( self: Dict ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] ,return_stdout=lowerCamelCase_ ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' ,lowerCamelCase_ ,) def A__ ( self: Optional[int] ) -> List[str]: UpperCAmelCase_ : Dict = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] ,return_stdout=lowerCamelCase_ ,) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' ,lowerCamelCase_ ,) def A__ ( self: Union[str, Any] ) -> int: UpperCAmelCase_ : Union[str, Any] = run_command( self.cmd + [ """--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--command""", """echo \"Hello World\"""", """--debug""", ] ,return_stdout=lowerCamelCase_ ,) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' ,lowerCamelCase_ ,) def A__ ( self: str ) -> Optional[Any]: UpperCAmelCase_ : int = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] ,return_stdout=lowerCamelCase_ ,) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' ,lowerCamelCase_ ,) def A__ ( self: Dict ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = run_command( self.cmd + [ """--config_file""", """tests/test_configs/0_12_0.yaml""", """--command_file""", self.command_file, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug""", ] ,return_stdout=lowerCamelCase_ ,) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' ,lowerCamelCase_ ,) def A__ ( self: Optional[int] ) -> Dict: UpperCAmelCase_ : List[str] = run_command( self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] ,return_stdout=lowerCamelCase_ ,) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' ,lowerCamelCase_ ,) def A__ ( self: List[str] ) -> List[str]: UpperCAmelCase_ : Dict = run_command( self.cmd + [ """--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--accelerate_version""", """12.0.0""", """--debug""", ] ,return_stdout=lowerCamelCase_ ,) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' ,lowerCamelCase_ ,)
59
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''', # See all Dinat models at https://huggingface.co/models?filter=dinat } class _snake_case ( __snake_case , __snake_case ): '''simple docstring''' A__ : Optional[Any] = "dinat" A__ : Any = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: Any ,lowerCamelCase_: Any=4 ,lowerCamelCase_: Union[str, Any]=3 ,lowerCamelCase_: Union[str, Any]=64 ,lowerCamelCase_: Optional[int]=[3, 4, 6, 5] ,lowerCamelCase_: int=[2, 4, 8, 16] ,lowerCamelCase_: Optional[int]=7 ,lowerCamelCase_: Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] ,lowerCamelCase_: Tuple=3.0 ,lowerCamelCase_: Any=True ,lowerCamelCase_: int=0.0 ,lowerCamelCase_: Optional[Any]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Optional[Any]=0.0_2 ,lowerCamelCase_: List[Any]=1e-5 ,lowerCamelCase_: int=0.0 ,lowerCamelCase_: int=None ,lowerCamelCase_: str=None ,**lowerCamelCase_: Dict ,) -> Union[str, Any]: super().__init__(**lowerCamelCase_ ) UpperCAmelCase_ : List[str] = patch_size UpperCAmelCase_ : Tuple = num_channels UpperCAmelCase_ : Union[str, Any] = embed_dim UpperCAmelCase_ : int = depths UpperCAmelCase_ : List[Any] = len(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = num_heads UpperCAmelCase_ : Tuple = kernel_size UpperCAmelCase_ : int = dilations UpperCAmelCase_ : Optional[Any] = mlp_ratio UpperCAmelCase_ : Optional[Any] = qkv_bias UpperCAmelCase_ : List[Any] = hidden_dropout_prob UpperCAmelCase_ : List[str] = attention_probs_dropout_prob UpperCAmelCase_ : Tuple = drop_path_rate UpperCAmelCase_ : List[str] = hidden_act UpperCAmelCase_ : Any = layer_norm_eps UpperCAmelCase_ : List[str] = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase_ : List[Any] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) UpperCAmelCase_ : Optional[int] = layer_scale_init_value UpperCAmelCase_ : List[Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 ,len(lowerCamelCase_ ) + 1 )] UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = get_aligned_output_features_output_indices( out_features=lowerCamelCase_ ,out_indices=lowerCamelCase_ ,stage_names=self.stage_names )
59
1