code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import string def a__ ( lowerCAmelCase__ ) -> None: for key in range(len(string.ascii_uppercase ) ): UpperCAmelCase__ : int = '''''' for symbol in message: if symbol in string.ascii_uppercase: UpperCAmelCase__ : Optional[Any] = string.ascii_uppercase.find(_A ) UpperCAmelCase__ : List[str] = num - key if num < 0: UpperCAmelCase__ : Tuple = num + len(string.ascii_uppercase ) UpperCAmelCase__ : Any = translated + string.ascii_uppercase[num] else: UpperCAmelCase__ : Union[str, Any] = translated + symbol print(F"""Decryption using Key #{key}: {translated}""" ) def a__ ( ) -> None: UpperCAmelCase__ : Optional[int] = input('''Encrypted message: ''' ) UpperCAmelCase__ : Any = message.upper() decrypt(_A ) if __name__ == "__main__": import doctest doctest.testmod() main()
75
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex UpperCAmelCase_ : List[str] = logging.getLogger(__name__) class UpperCamelCase : def __init__( self ): A__ = False def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): if not self.initialized: A__ = RagRetriever( UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , index=UpperCAmelCase__ , init_retrieval=UpperCAmelCase__ , ) A__ = True def __A ( self ): self.retriever.index.init_index() def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ): A__ , A__ = self.retriever._main_retrieve(UpperCAmelCase__ , UpperCAmelCase__ ) return doc_ids, retrieved_doc_embeds class UpperCamelCase ( _UpperCAmelCase ): def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ): if index is not None and index.is_initialized() and len(UpperCAmelCase__ ) > 0: raise ValueError( "When using Ray for distributed fine-tuning, " "you'll need to provide the paths instead, " "as the dataset and the index are loaded " "separately. More info in examples/rag/use_own_knowledge_dataset.py " ) super().__init__( UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , index=UpperCAmelCase__ , init_retrieval=UpperCAmelCase__ , ) A__ = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) for worker in self.retrieval_workers ] ) def __A ( self ): logger.info("initializing retrieval" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ): if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. A__ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] A__ , A__ = ray.get(random_worker.retrieve.remote(UpperCAmelCase__ , UpperCAmelCase__ ) ) else: A__ , A__ = self._main_retrieve(UpperCAmelCase__ , UpperCAmelCase__ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase__ ) @classmethod def __A ( cls , UpperCAmelCase__ , UpperCAmelCase__=None , **UpperCAmelCase__ ): return super(UpperCAmelCase__ , cls ).get_tokenizers(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ) @classmethod def __A ( cls , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , **UpperCAmelCase__ ): A__ = kwargs.pop("config" , UpperCAmelCase__ ) or RagConfig.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) A__ = RagTokenizer.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ ) A__ = rag_tokenizer.question_encoder A__ = rag_tokenizer.generator if indexed_dataset is not None: A__ = "custom" A__ = CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase__ ) else: A__ = cls._build_index(UpperCAmelCase__ ) return cls( UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , retrieval_workers=UpperCAmelCase__ , index=UpperCAmelCase__ , )
491
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE : Optional[Any] = { "configuration_efficientnet": [ "EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "EfficientNetConfig", "EfficientNetOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Union[str, Any] = ["EfficientNetImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Optional[int] = [ "EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST", "EfficientNetForImageClassification", "EfficientNetModel", "EfficientNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
354
import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger() def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]: _lowercase : List[Any] = '\n'.join(lowerCamelCase_ ) Path(lowerCamelCase_ ).open('w' ).writelines(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = "patrickvonplaten/t5-tiny-random" SCREAMING_SNAKE_CASE : List[Any] = "sshleifer/bart-tiny-random" SCREAMING_SNAKE_CASE : int = "sshleifer/tiny-mbart" SCREAMING_SNAKE_CASE : Optional[int] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class _lowerCamelCase( _a ): def UpperCamelCase ( self, lowerCamelCase) -> Dict: """simple docstring""" _lowercase : int = Path(self.get_auto_remove_tmp_dir()) / 'utest_input.source' _lowercase : str = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _lowercase : str = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(lowerCamelCase, lowerCamelCase) _lowercase : List[str] = str(Path(self.get_auto_remove_tmp_dir()) / 'scores.json') _lowercase : Optional[int] = 'translation_en_to_de' if model == T5_TINY else 'summarization' _lowercase : Any = F''' run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 '''.split() with patch.object(lowerCamelCase, 'argv', lowerCamelCase): run_generate() assert Path(lowerCamelCase).exists() # os.remove(Path(output_file_name)) def UpperCamelCase ( self) -> Dict: """simple docstring""" self.run_eval_tester(lowerCamelCase) @parameterized.expand([BART_TINY, MBART_TINY]) @slow def UpperCamelCase ( self, lowerCamelCase) -> int: """simple docstring""" self.run_eval_tester(lowerCamelCase) @parameterized.expand([T5_TINY, MBART_TINY]) @slow def UpperCamelCase ( self, lowerCamelCase) -> List[str]: """simple docstring""" _lowercase : str = Path(self.get_auto_remove_tmp_dir()) / 'utest_input.source' _lowercase : Any = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _lowercase : List[str] = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } _lowercase : Optional[Any] = Path(self.get_auto_remove_tmp_dir()) _lowercase : Optional[Any] = str(tmp_dir / 'scores.json') _lowercase : str = str(tmp_dir / 'val.target') _dump_articles(lowerCamelCase, text['en']) _dump_articles(lowerCamelCase, text['de']) _lowercase : Tuple = 'translation_en_to_de' if model == T5_TINY else 'summarization' _lowercase : Tuple = F''' run_eval_search.py {model} {str(lowerCamelCase)} {str(lowerCamelCase)} --score_path {score_path} --reference_path {reference_path} --task {task} '''.split() testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0']) with patch.object(lowerCamelCase, 'argv', lowerCamelCase): with CaptureStdout() as cs: run_search() _lowercase : Dict = [' num_beams | length_penalty', model, 'Best score args'] _lowercase : Optional[Any] = ['Info'] if "translation" in task: expected_strings.append('bleu') else: expected_strings.extend(lowerCamelCase) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(lowerCamelCase).exists() os.remove(Path(lowerCamelCase))
354
1
import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = OrderedDict() for key, value in state_dict.items(): if key.startswith('''module.encoder''' ): __SCREAMING_SNAKE_CASE : Optional[int] = key.replace('''module.encoder''' , '''glpn.encoder''' ) if key.startswith('''module.decoder''' ): __SCREAMING_SNAKE_CASE : List[Any] = key.replace('''module.decoder''' , '''decoder.stages''' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 __SCREAMING_SNAKE_CASE : Optional[Any] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )] __SCREAMING_SNAKE_CASE : str = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(snake_case )-1}''' ) if "norm" in key: __SCREAMING_SNAKE_CASE : Tuple = key.replace('''norm''' , '''layer_norm''' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 __SCREAMING_SNAKE_CASE : str = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )] __SCREAMING_SNAKE_CASE : Any = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(snake_case )-1}''' ) if "layer_norm1" in key: __SCREAMING_SNAKE_CASE : Dict = key.replace('''layer_norm1''' , '''layer_norm_1''' ) if "layer_norm2" in key: __SCREAMING_SNAKE_CASE : List[Any] = key.replace('''layer_norm2''' , '''layer_norm_2''' ) if "block" in key: # replace for example block1 by block.0 __SCREAMING_SNAKE_CASE : List[str] = key[key.find('''block''' ) + len('''block''' )] __SCREAMING_SNAKE_CASE : Dict = key.replace(F'''block{idx}''' , F'''block.{int(snake_case )-1}''' ) if "attn.q" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''attn.q''' , '''attention.self.query''' ) if "attn.proj" in key: __SCREAMING_SNAKE_CASE : Dict = key.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in key: __SCREAMING_SNAKE_CASE : Tuple = key.replace('''attn''' , '''attention.self''' ) if "fc1" in key: __SCREAMING_SNAKE_CASE : Dict = key.replace('''fc1''' , '''dense1''' ) if "fc2" in key: __SCREAMING_SNAKE_CASE : Dict = key.replace('''fc2''' , '''dense2''' ) if "linear_pred" in key: __SCREAMING_SNAKE_CASE : int = key.replace('''linear_pred''' , '''classifier''' ) if "linear_fuse" in key: __SCREAMING_SNAKE_CASE : int = key.replace('''linear_fuse.conv''' , '''linear_fuse''' ) __SCREAMING_SNAKE_CASE : List[Any] = key.replace('''linear_fuse.bn''' , '''batch_norm''' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 __SCREAMING_SNAKE_CASE : List[str] = key[key.find('''linear_c''' ) + len('''linear_c''' )] __SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(snake_case )-1}''' ) if "bot_conv" in key: __SCREAMING_SNAKE_CASE : List[Any] = key.replace('''bot_conv''' , '''0.convolution''' ) if "skip_conv1" in key: __SCREAMING_SNAKE_CASE : int = key.replace('''skip_conv1''' , '''1.convolution''' ) if "skip_conv2" in key: __SCREAMING_SNAKE_CASE : Any = key.replace('''skip_conv2''' , '''2.convolution''' ) if "fusion1" in key: __SCREAMING_SNAKE_CASE : Any = key.replace('''fusion1''' , '''1.fusion''' ) if "fusion2" in key: __SCREAMING_SNAKE_CASE : Tuple = key.replace('''fusion2''' , '''2.fusion''' ) if "fusion3" in key: __SCREAMING_SNAKE_CASE : Optional[int] = key.replace('''fusion3''' , '''3.fusion''' ) if "fusion" in key and "conv" in key: __SCREAMING_SNAKE_CASE : Any = key.replace('''conv''' , '''convolutional_layer''' ) if key.startswith('''module.last_layer_depth''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''module.last_layer_depth''' , '''head.head''' ) __SCREAMING_SNAKE_CASE : Tuple = value return new_state_dict def a__ ( snake_case , snake_case ): """simple docstring""" # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) __SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) __SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict __SCREAMING_SNAKE_CASE : Dict = kv_weight[ : config.hidden_sizes[i], : ] __SCREAMING_SNAKE_CASE : str = kv_bias[: config.hidden_sizes[i]] __SCREAMING_SNAKE_CASE : List[str] = kv_weight[ config.hidden_sizes[i] :, : ] __SCREAMING_SNAKE_CASE : List[Any] = kv_bias[config.hidden_sizes[i] :] def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(snake_case , stream=snake_case ).raw ) return image @torch.no_grad() def a__ ( snake_case , snake_case , snake_case=False , snake_case=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) __SCREAMING_SNAKE_CASE : Union[str, Any] = GLPNImageProcessor() # prepare image __SCREAMING_SNAKE_CASE : str = prepare_img() __SCREAMING_SNAKE_CASE : str = image_processor(images=snake_case , return_tensors='''pt''' ).pixel_values logger.info('''Converting model...''' ) # load original state dict __SCREAMING_SNAKE_CASE : Optional[int] = torch.load(snake_case , map_location=torch.device('''cpu''' ) ) # rename keys __SCREAMING_SNAKE_CASE : str = rename_keys(snake_case ) # key and value matrices need special treatment read_in_k_v(snake_case , snake_case ) # create HuggingFace model and load state dict __SCREAMING_SNAKE_CASE : Dict = GLPNForDepthEstimation(snake_case ) model.load_state_dict(snake_case ) model.eval() # forward pass __SCREAMING_SNAKE_CASE : int = model(snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: __SCREAMING_SNAKE_CASE : int = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) __SCREAMING_SNAKE_CASE : Any = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , snake_case , atol=1E-4 ) print('''Looks ok!''' ) # finally, push to hub if required if push_to_hub: logger.info('''Pushing model and image processor to the hub...''' ) model.push_to_hub( repo_path_or_name=Path(snake_case , snake_case ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=snake_case , ) image_processor.push_to_hub( repo_path_or_name=Path(snake_case , snake_case ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=snake_case , ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) lowercase_ = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
74
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )] # initialize interval's left pointer and right pointer __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0 for i in range(1 , len(snake_case ) ): # case when current index is inside the interval if i <= right_pointer: __SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __SCREAMING_SNAKE_CASE : Dict = min_edge while go_next(snake_case , snake_case , snake_case ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1 return z_result def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
74
1
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class a_ ( unittest.TestCase ): def lowercase__ ( self : List[str] ): __snake_case = 0 @slow def lowercase__ ( self : str ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(__lowerCAmelCase ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(__lowerCAmelCase ) , 0 ) def lowercase__ ( self : Optional[int] ): __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def lowercase__ ( self : Tuple ): __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 2_0 ) def lowercase__ ( self : Any ): __snake_case = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) # Check that tokenizer_type ≠ model_type __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , config=__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def lowercase__ ( self : Tuple ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(__lowerCAmelCase , 'vocab.txt' ) ) __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type='bert' , use_fast=__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.json' , os.path.join(__lowerCAmelCase , 'vocab.json' ) ) shutil.copy('./tests/fixtures/merges.txt' , os.path.join(__lowerCAmelCase , 'merges.txt' ) ) __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type='gpt2' , use_fast=__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @require_tokenizers def lowercase__ ( self : Optional[Any] ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(__lowerCAmelCase , 'vocab.txt' ) ) __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type='bert' ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.json' , os.path.join(__lowerCAmelCase , 'vocab.json' ) ) shutil.copy('./tests/fixtures/merges.txt' , os.path.join(__lowerCAmelCase , 'merges.txt' ) ) __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type='gpt2' ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def lowercase__ ( self : int ): with pytest.raises(__lowerCAmelCase ): AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx' ) @require_tokenizers def lowercase__ ( self : Union[str, Any] ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: __snake_case = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' ) self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCAmelCase ) else: self.assertEqual(tokenizer.do_lower_case , __lowerCAmelCase ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) @require_tokenizers def lowercase__ ( self : str ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( __lowerCAmelCase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ): __snake_case = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' ) def lowercase__ ( self : Any ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai __snake_case = TOKENIZER_MAPPING.values() __snake_case = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(__lowerCAmelCase ) @require_tokenizers def lowercase__ ( self : List[str] ): self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__lowerCAmelCase ) , __lowerCAmelCase ) self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) , __lowerCAmelCase ) @require_tokenizers def lowercase__ ( self : Optional[int] ): __snake_case = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=__lowerCAmelCase ) __snake_case = 'Hello, world. How are you?' __snake_case = tokenizer.tokenize(__lowerCAmelCase ) self.assertEqual('[UNK]' , tokens[0] ) __snake_case = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=__lowerCAmelCase ) __snake_case = tokenizer.tokenize(__lowerCAmelCase ) self.assertEqual('[UNK]' , tokens[0] ) @require_tokenizers def lowercase__ ( self : Optional[Any] ): __snake_case = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' ) self.assertEqual(type(__lowerCAmelCase ) , __lowerCAmelCase ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 ) self.assertEqual(tokenizer.unk_token , '[UNK]' ) self.assertEqual(tokenizer.padding_side , 'right' ) self.assertEqual(tokenizer.truncation_side , 'right' ) def lowercase__ ( self : List[str] ): __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowerCAmelCase ) __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 1_2 ) def lowercase__ ( self : Tuple ): __snake_case = AutoTokenizer.from_pretrained('ctrl' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def lowercase__ ( self : Dict ): # Check we can load the tokenizer config of an online model. __snake_case = get_tokenizer_config('bert-base-cased' ) __snake_case = config.pop('_commit_hash' , __lowerCAmelCase ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(__lowerCAmelCase , {'do_lower_case': False} ) # This model does not have a tokenizer_config so we get back an empty dict. __snake_case = get_tokenizer_config(__lowerCAmelCase ) self.assertDictEqual(__lowerCAmelCase , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowerCAmelCase ) __snake_case = get_tokenizer_config(__lowerCAmelCase ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['tokenizer_class'] , 'BertTokenizer' ) def lowercase__ ( self : List[str] ): try: AutoConfig.register('custom' , __lowerCAmelCase ) AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowerCAmelCase ): AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase ) __snake_case = CustomTokenizer.from_pretrained(__lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowerCAmelCase ) __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def lowercase__ ( self : Union[str, Any] ): try: AutoConfig.register('custom' , __lowerCAmelCase ) # Can register in two steps AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( __lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowerCAmelCase ): AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: __snake_case = BertTokenizerFast.from_pretrained(__lowerCAmelCase ) bert_tokenizer.save_pretrained(__lowerCAmelCase ) __snake_case = CustomTokenizerFast.from_pretrained(__lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowerCAmelCase ) __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , use_fast=__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase__ ( self : Dict ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__lowerCAmelCase ): __snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' ) # If remote code is disabled, we can't load this config. with self.assertRaises(__lowerCAmelCase ): __snake_case = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase ) __snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowerCAmelCase ) __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , trust_remote_code=__lowerCAmelCase ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast' ) # Test we can also load the slow version __snake_case = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowerCAmelCase ) __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' ) @require_tokenizers def lowercase__ ( self : str ): class a_ ( UpperCAmelCase__ ): lowercase_ : str = False class a_ ( UpperCAmelCase__ ): lowercase_ : Optional[Any] = NewTokenizer lowercase_ : str = False try: AutoConfig.register('custom' , __lowerCAmelCase ) AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase ) AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase ) # If remote code is not set, the default is to use local __snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) self.assertFalse(tokenizer.special_attribute_present ) __snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=__lowerCAmelCase ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. __snake_case = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) self.assertFalse(tokenizer.special_attribute_present ) __snake_case = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub __snake_case = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) self.assertTrue(tokenizer.special_attribute_present ) __snake_case = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase__ ( self : Optional[Any] ): __snake_case = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=__lowerCAmelCase ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) # Test we can also load the slow version __snake_case = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) else: self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) def lowercase__ ( self : Union[str, Any] ): with self.assertRaisesRegex( __lowerCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ): __snake_case = AutoTokenizer.from_pretrained('bert-base' ) def lowercase__ ( self : str ): with self.assertRaisesRegex( __lowerCAmelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): __snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , revision='aaaaaa' ) def lowercase__ ( self : int ): # Make sure we have cached the tokenizer. __snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) with RequestCounter() as counter: __snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
704
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a_ ( UpperCAmelCase__ , unittest.TestCase ): lowercase_ : int = RobertaTokenizer lowercase_ : int = RobertaTokenizerFast lowercase_ : int = True lowercase_ : Dict = {'''cls_token''': '''<s>'''} def lowercase__ ( self : Union[str, Any] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __snake_case = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __snake_case = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) __snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __snake_case = {'unk_token': '<unk>'} __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__lowerCAmelCase ) ) def lowercase__ ( self : Tuple , **__lowerCAmelCase : List[str] ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def lowercase__ ( self : Dict , **__lowerCAmelCase : Tuple ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : int ): __snake_case = 'lower newer' __snake_case = 'lower newer' return input_text, output_text def lowercase__ ( self : Union[str, Any] ): __snake_case = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) __snake_case = 'lower newer' __snake_case = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] __snake_case = tokenizer.tokenize(__lowerCAmelCase ) # , add_prefix_space=True) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __snake_case = tokens + [tokenizer.unk_token] __snake_case = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase ) def lowercase__ ( self : Tuple ): __snake_case = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , ) @slow def lowercase__ ( self : int ): __snake_case = self.tokenizer_class.from_pretrained('roberta-base' ) __snake_case = tokenizer.encode('sequence builders' , add_special_tokens=__lowerCAmelCase ) __snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=__lowerCAmelCase ) __snake_case = tokenizer.encode( 'sequence builders' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __snake_case = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __snake_case = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase ) __snake_case = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def lowercase__ ( self : int ): __snake_case = self.get_tokenizer() __snake_case = 'Encode this sequence.' __snake_case = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments __snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase ) __snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) __snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __snake_case = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase ) # Testing spaces after special tokens __snake_case = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )} ) # mask token has a left space __snake_case = tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) __snake_case = 'Encode <mask> sequence' __snake_case = 'Encode <mask>sequence' __snake_case = tokenizer.encode(__lowerCAmelCase ) __snake_case = encoded.index(__lowerCAmelCase ) __snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) __snake_case = tokenizer.encode(__lowerCAmelCase ) __snake_case = encoded.index(__lowerCAmelCase ) __snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase ) def lowercase__ ( self : List[str] ): pass def lowercase__ ( self : Dict ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): __snake_case = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __snake_case = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __snake_case = 'A, <mask> AllenNLP sentence.' __snake_case = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase ) __snake_case = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) __snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) __snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual( __lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( __lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def lowercase__ ( self : Optional[int] ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): __snake_case = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __snake_case = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) __snake_case = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , __lowerCAmelCase ) self.assertEqual(post_processor_state['add_prefix_space'] , __lowerCAmelCase ) self.assertEqual(post_processor_state['trim_offsets'] , __lowerCAmelCase ) def lowercase__ ( self : Optional[Any] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): __snake_case = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` __snake_case = F'{text_of_1_token} {text_of_1_token}' __snake_case = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __snake_case = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __snake_case = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __snake_case = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __snake_case = F' {text}' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) __snake_case = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ) + 1, 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __snake_case = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __snake_case = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
427
0
"""simple docstring""" import os from datetime import datetime as dt from github import Github __magic_name__ = [ "good first issue", "feature request", "wip", ] def _lowerCamelCase ( ) -> Optional[int]: '''simple docstring''' a__ = Github(os.environ['GITHUB_TOKEN'] ) a__ = g.get_repo('huggingface/accelerate' ) a__ = repo.get_issues(state='open' ) for issue in open_issues: a__ = sorted([comment for comment in issue.get_comments()],key=lambda UpperCAmelCase__ : i.created_at,reverse=lowerCAmelCase__ ) a__ = comments[0] if len(lowerCAmelCase__ ) > 0 else None a__ = dt.utcnow() a__ = (current_time - issue.updated_at).days a__ = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state='closed' ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) if __name__ == "__main__": main()
232
from __future__ import annotations import math import numpy as np from numpy.linalg import norm def __UpperCamelCase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ): return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) ) def __UpperCamelCase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ): if dataset.ndim != value_array.ndim: __a : Optional[Any] = ( '''Wrong input data\'s dimensions... ''' f"dataset : {dataset.ndim}, value_array : {value_array.ndim}" ) raise ValueError(lowerCAmelCase__ ) try: if dataset.shape[1] != value_array.shape[1]: __a : Optional[int] = ( '''Wrong input data\'s shape... ''' f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}" ) raise ValueError(lowerCAmelCase__ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('''Wrong shape''' ) if dataset.dtype != value_array.dtype: __a : Tuple = ( '''Input data have different datatype... ''' f"dataset : {dataset.dtype}, value_array : {value_array.dtype}" ) raise TypeError(lowerCAmelCase__ ) __a : Optional[Any] = [] for value in value_array: __a : Union[str, Any] = euclidean(lowerCAmelCase__ , dataset[0] ) __a : List[Any] = dataset[0].tolist() for dataset_value in dataset[1:]: __a : List[str] = euclidean(lowerCAmelCase__ , lowerCAmelCase__ ) if dist > temp_dist: __a : List[Any] = temp_dist __a : Optional[Any] = dataset_value.tolist() answer.append([vector, dist] ) return answer def __UpperCamelCase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ): return np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) / (norm(lowerCAmelCase__ ) * norm(lowerCAmelCase__ )) if __name__ == "__main__": import doctest doctest.testmod()
521
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @property def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) _lowercase : Tuple = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) return model def __UpperCAmelCase ( self : Tuple ) -> Tuple: '''simple docstring''' _lowercase : List[Any] = self.dummy_uncond_unet _lowercase : List[str] = ScoreSdeVeScheduler() _lowercase : Optional[int] = ScoreSdeVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) sde_ve.to(UpperCamelCase_ ) sde_ve.set_progress_bar_config(disable=UpperCamelCase_ ) _lowercase : Any = torch.manual_seed(0 ) _lowercase : List[str] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=UpperCamelCase_ ).images _lowercase : str = torch.manual_seed(0 ) _lowercase : Union[str, Any] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=UpperCamelCase_ , return_dict=UpperCamelCase_ )[ 0 ] _lowercase : Optional[Any] = image[0, -3:, -3:, -1] _lowercase : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _lowercase : Union[str, Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _lowercase : Optional[int] = 'google/ncsnpp-church-256' _lowercase : int = UNetaDModel.from_pretrained(UpperCamelCase_ ) _lowercase : List[Any] = ScoreSdeVeScheduler.from_pretrained(UpperCamelCase_ ) _lowercase : Optional[Any] = ScoreSdeVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) sde_ve.to(UpperCamelCase_ ) sde_ve.set_progress_bar_config(disable=UpperCamelCase_ ) _lowercase : int = torch.manual_seed(0 ) _lowercase : Any = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=UpperCamelCase_ ).images _lowercase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _lowercase : List[str] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
4
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple: '''simple docstring''' _lowercase : int = parent _lowercase : str = batch_size _lowercase : List[str] = seq_length _lowercase : Dict = is_training _lowercase : Optional[int] = use_attention_mask _lowercase : List[Any] = use_token_type_ids _lowercase : Union[str, Any] = use_labels _lowercase : Dict = vocab_size _lowercase : List[Any] = hidden_size _lowercase : Any = num_hidden_layers _lowercase : int = num_attention_heads _lowercase : Optional[int] = intermediate_size _lowercase : Any = hidden_act _lowercase : List[str] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : Optional[int] = max_position_embeddings _lowercase : int = type_vocab_size _lowercase : Any = type_sequence_label_size _lowercase : Any = initializer_range _lowercase : str = num_choices def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase : int = None if self.use_attention_mask: _lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : Any = None if self.use_token_type_ids: _lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : str = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCAmelCase ( self : List[Any] ) -> int: '''simple docstring''' _lowercase : Dict = self.prepare_config_and_inputs() _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs _lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): '''simple docstring''' A_ = True A_ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCAmelCase ( self : str ) -> int: '''simple docstring''' _lowercase : Tuple = FlaxRoFormerModelTester(self ) @slow def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: '''simple docstring''' for model_class_name in self.all_model_classes: _lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ ) _lowercase : str = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase_ ) @require_flax class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self : List[str] ) -> List[Any]: '''simple docstring''' _lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) _lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] ) _lowercase : int = model(UpperCamelCase_ )[0] _lowercase : Union[str, Any] = 5_0000 _lowercase : str = (1, 6, vocab_size) self.assertEqual(output.shape , UpperCamelCase_ ) _lowercase : int = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
4
1
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowerCamelCase__ ( __a ): SCREAMING_SNAKE_CASE = ['''image_processor''', '''feature_extractor'''] SCREAMING_SNAKE_CASE = '''TvltImageProcessor''' SCREAMING_SNAKE_CASE = '''TvltFeatureExtractor''' def __init__( self ,A ,A ): super().__init__(image_processor=lowerCAmelCase__ ,feature_extractor=lowerCAmelCase__ ) UpperCAmelCase = image_processor UpperCAmelCase = feature_extractor def __call__( self ,A=None ,A=None ,A=None ,A=None ,A=False ,A=False ,*A ,**A ,): if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) UpperCAmelCase = None if images is not None: UpperCAmelCase = self.image_processor(lowerCAmelCase__ ,mask_pixel=lowerCAmelCase__ ,*lowerCAmelCase__ ,**lowerCAmelCase__ ) if images_mixed is not None: UpperCAmelCase = self.image_processor(lowerCAmelCase__ ,is_mixed=lowerCAmelCase__ ,*lowerCAmelCase__ ,**lowerCAmelCase__ ) if audio is not None: UpperCAmelCase = self.feature_extractor( lowerCAmelCase__ ,*lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,mask_audio=lowerCAmelCase__ ,**lowerCAmelCase__ ) UpperCAmelCase = {} if audio is not None: output_dict.update(lowerCAmelCase__ ) if images is not None: output_dict.update(lowerCAmelCase__ ) if images_mixed_dict is not None: output_dict.update(lowerCAmelCase__ ) return output_dict @property def _UpperCamelCase ( self ): UpperCAmelCase = self.image_processor.model_input_names UpperCAmelCase = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
341
"""simple docstring""" from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup __magic_name__ = "https://www.indeed.co.in/jobs?q=mobile+app+development&l=" def _lowerCAmelCase ( UpperCamelCase_ = "mumbai" ): __SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(url + location ).content , """html.parser""" ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ): __SCREAMING_SNAKE_CASE = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip() __SCREAMING_SNAKE_CASE = job.find("""span""" , {"""class""": """company"""} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs("Bangalore"), 1): print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
155
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a : str = logging.get_logger(__name__) a : Any = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' for attribute in key.split("." ): UpperCAmelCase : Tuple = getattr(__magic_name__ , __magic_name__ ) if weight_type is not None: UpperCAmelCase : Dict = getattr(__magic_name__ , __magic_name__ ).shape else: UpperCAmelCase : Any = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": UpperCAmelCase : Optional[Any] = value elif weight_type == "weight_g": UpperCAmelCase : int = value elif weight_type == "weight_v": UpperCAmelCase : Dict = value elif weight_type == "bias": UpperCAmelCase : Any = value else: UpperCAmelCase : List[str] = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : int = [] UpperCAmelCase : Optional[Any] = fairseq_model.state_dict() UpperCAmelCase : List[str] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase : List[str] = False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == "group" , ) UpperCAmelCase : List[Any] = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase : Optional[Any] = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned): UpperCAmelCase : Optional[int] = True if "*" in mapped_key: UpperCAmelCase : Union[str, Any] = name.split(__magic_name__ )[0].split("." )[-2] UpperCAmelCase : Dict = mapped_key.replace("*" , __magic_name__ ) if "weight_g" in name: UpperCAmelCase : Any = "weight_g" elif "weight_v" in name: UpperCAmelCase : Union[str, Any] = "weight_v" elif "weight" in name: UpperCAmelCase : List[str] = "weight" elif "bias" in name: UpperCAmelCase : Optional[Any] = "bias" else: UpperCAmelCase : str = None set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) continue if not is_used: unused_weights.append(__magic_name__ ) logger.warning(F"Unused weights: {unused_weights}" ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = full_name.split("conv_layers." )[-1] UpperCAmelCase : str = name.split("." ) UpperCAmelCase : List[str] = int(items[0] ) UpperCAmelCase : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) UpperCAmelCase : int = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) UpperCAmelCase : List[str] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) UpperCAmelCase : Dict = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) UpperCAmelCase : str = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(__magic_name__ ) @torch.no_grad() def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=True ): '''simple docstring''' if config_path is not None: UpperCAmelCase : List[str] = HubertConfig.from_pretrained(__magic_name__ ) else: UpperCAmelCase : Tuple = HubertConfig() if is_finetuned: if dict_path: UpperCAmelCase : Dict = Dictionary.load(__magic_name__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase : List[str] = target_dict.pad_index UpperCAmelCase : List[str] = target_dict.bos_index UpperCAmelCase : List[Any] = target_dict.eos_index UpperCAmelCase : List[str] = len(target_dict.symbols ) UpperCAmelCase : Optional[Any] = os.path.join(__magic_name__ , "vocab.json" ) if not os.path.isdir(__magic_name__ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__magic_name__ ) ) return os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) with open(__magic_name__ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices , __magic_name__ ) UpperCAmelCase : Optional[Any] = WavaVecaCTCTokenizer( __magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__magic_name__ , ) UpperCAmelCase : List[Any] = True if config.feat_extract_norm == "layer" else False UpperCAmelCase : List[str] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , ) UpperCAmelCase : Optional[int] = WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) processor.save_pretrained(__magic_name__ ) UpperCAmelCase : List[str] = HubertForCTC(__magic_name__ ) else: UpperCAmelCase : Union[str, Any] = HubertModel(__magic_name__ ) if is_finetuned: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) UpperCAmelCase : Any = model[0].eval() recursively_load_weights(__magic_name__ , __magic_name__ , __magic_name__ ) hf_wavavec.save_pretrained(__magic_name__ ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) a : Union[str, Any] = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
609
'''simple docstring''' import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=False , snake_case=False , snake_case=False , snake_case=2 , snake_case=9_9 , snake_case=0 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=2 , snake_case=0.02 , snake_case=2 , snake_case=4 , snake_case="last" , snake_case=True , snake_case=None , snake_case=0 , ): '''simple docstring''' UpperCAmelCase : str = parent UpperCAmelCase : str = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : int = is_training UpperCAmelCase : Any = use_input_lengths UpperCAmelCase : str = use_token_type_ids UpperCAmelCase : List[str] = use_labels UpperCAmelCase : Any = gelu_activation UpperCAmelCase : str = sinusoidal_embeddings UpperCAmelCase : List[Any] = causal UpperCAmelCase : Union[str, Any] = asm UpperCAmelCase : List[str] = n_langs UpperCAmelCase : Optional[int] = vocab_size UpperCAmelCase : str = n_special UpperCAmelCase : str = hidden_size UpperCAmelCase : Union[str, Any] = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : List[Any] = hidden_dropout_prob UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase : str = max_position_embeddings UpperCAmelCase : Optional[int] = type_sequence_label_size UpperCAmelCase : Optional[int] = initializer_range UpperCAmelCase : Union[str, Any] = num_labels UpperCAmelCase : Union[str, Any] = num_choices UpperCAmelCase : Dict = summary_type UpperCAmelCase : Dict = use_proj UpperCAmelCase : List[Any] = scope UpperCAmelCase : Optional[int] = bos_token_id def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Optional[Any] = None if self.use_input_lengths: UpperCAmelCase : Dict = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCAmelCase : Union[str, Any] = None if self.use_token_type_ids: UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) UpperCAmelCase : Any = None UpperCAmelCase : Optional[Any] = None UpperCAmelCase : Tuple = None if self.use_labels: UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Dict = ids_tensor([self.batch_size] , 2 ).float() UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase : List[str] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def A_ ( self ): '''simple docstring''' return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : Any = XLMModel(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Any = model(snake_case , lengths=snake_case , langs=snake_case ) UpperCAmelCase : Any = model(snake_case , langs=snake_case ) UpperCAmelCase : Union[str, Any] = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : int = XLMWithLMHeadModel(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Tuple = model(snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : Optional[int] = XLMForQuestionAnsweringSimple(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : List[str] = model(snake_case ) UpperCAmelCase : List[str] = model(snake_case , start_positions=snake_case , end_positions=snake_case ) UpperCAmelCase : List[str] = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = XLMForQuestionAnswering(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Union[str, Any] = model(snake_case ) UpperCAmelCase : List[str] = model( snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , p_mask=snake_case , ) UpperCAmelCase : Optional[Any] = model( snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , ) ((UpperCAmelCase) , ) : str = result_with_labels.to_tuple() UpperCAmelCase : List[str] = model(snake_case , start_positions=snake_case , end_positions=snake_case ) ((UpperCAmelCase) , ) : str = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : Any = XLMForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Optional[int] = model(snake_case ) UpperCAmelCase : int = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.num_labels UpperCAmelCase : Optional[int] = XLMForTokenClassification(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : List[Any] = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.num_choices UpperCAmelCase : Tuple = XLMForMultipleChoice(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : Tuple = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) : Union[str, Any] = config_and_inputs UpperCAmelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class UpperCamelCase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : str = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable SCREAMING_SNAKE_CASE__ : Tuple = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def A_ ( self , snake_case , snake_case , snake_case=False ): '''simple docstring''' UpperCAmelCase : Any = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": UpperCAmelCase : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case ) UpperCAmelCase : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case ) return inputs_dict def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = XLMModelTester(self ) UpperCAmelCase : str = ConfigTester(self , config_class=snake_case , emb_dim=3_7 ) def A_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ): '''simple docstring''' self.assertIsInstance(snake_case , snake_case ) self.assertListEqual( [isinstance(snake_case , snake_case ) for iter_attentions in attentions] , [True] * len(snake_case ) ) self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(snake_case ): # adds PAD dummy token UpperCAmelCase : str = min_length + idx + 1 UpperCAmelCase : List[Any] = min_length + idx + 1 UpperCAmelCase : List[Any] = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case ) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=False , snake_case=1 ): '''simple docstring''' self.assertIsInstance(snake_case , snake_case ) self.assertListEqual( [isinstance(snake_case , snake_case ) for iter_hidden_states in hidden_states] , [True] * len(snake_case ) , ) self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(snake_case ): # adds PAD dummy token UpperCAmelCase : List[Any] = min_length + idx + 1 UpperCAmelCase : Dict = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case ) , ) pass @slow def A_ ( self ): '''simple docstring''' for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Tuple = XLMModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" ) model.to(snake_case ) UpperCAmelCase : Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=snake_case ) # the president UpperCAmelCase : Tuple = [ 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference UpperCAmelCase : Dict = model.generate(snake_case , do_sample=snake_case ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case )
609
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() snake_case__ : int = logging.get_logger(__name__) snake_case__ : Optional[int] = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase): for attribute in key.split('.'): UpperCamelCase_ = getattr(__lowercase , __lowercase) if weight_type is not None: UpperCamelCase_ = getattr(__lowercase , __lowercase).shape else: UpperCamelCase_ = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCamelCase_ = value elif weight_type == "weight_g": UpperCamelCase_ = value elif weight_type == "weight_v": UpperCamelCase_ = value elif weight_type == "bias": UpperCamelCase_ = value else: UpperCamelCase_ = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""") def _snake_case (__lowercase , __lowercase , __lowercase): UpperCamelCase_ = [] UpperCamelCase_ = fairseq_model.state_dict() UpperCamelCase_ = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase_ = False if "conv_layers" in name: load_conv_layer( __lowercase , __lowercase , __lowercase , __lowercase , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase_ = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase_ = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or (key.split('w2v_model.')[-1] == name.split('.')[0] and not is_finetuned): UpperCamelCase_ = True if "*" in mapped_key: UpperCamelCase_ = name.split(__lowercase)[0].split('.')[-2] UpperCamelCase_ = mapped_key.replace('*' , __lowercase) if "weight_g" in name: UpperCamelCase_ = 'weight_g' elif "weight_v" in name: UpperCamelCase_ = 'weight_v' elif "weight" in name: UpperCamelCase_ = 'weight' elif "bias" in name: UpperCamelCase_ = 'bias' else: UpperCamelCase_ = None set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase) continue if not is_used: unused_weights.append(__lowercase) logger.warning(f"""Unused weights: {unused_weights}""") def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase): UpperCamelCase_ = full_name.split('conv_layers.')[-1] UpperCamelCase_ = name.split('.') UpperCamelCase_ = int(items[0]) UpperCamelCase_ = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCamelCase_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCamelCase_ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""") elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) UpperCamelCase_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCamelCase_ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""") else: unused_weights.append(__lowercase) @torch.no_grad() def _snake_case (__lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=True): if config_path is not None: UpperCamelCase_ = HubertConfig.from_pretrained(__lowercase) else: UpperCamelCase_ = HubertConfig() if is_finetuned: if dict_path: UpperCamelCase_ = Dictionary.load(__lowercase) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase_ = target_dict.pad_index UpperCamelCase_ = target_dict.bos_index UpperCamelCase_ = target_dict.eos_index UpperCamelCase_ = len(target_dict.symbols) UpperCamelCase_ = os.path.join(__lowercase , 'vocab.json') if not os.path.isdir(__lowercase): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__lowercase)) return os.makedirs(__lowercase , exist_ok=__lowercase) with open(__lowercase , 'w' , encoding='utf-8') as vocab_handle: json.dump(target_dict.indices , __lowercase) UpperCamelCase_ = WavaVecaCTCTokenizer( __lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__lowercase , ) UpperCamelCase_ = True if config.feat_extract_norm == 'layer' else False UpperCamelCase_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowercase , return_attention_mask=__lowercase , ) UpperCamelCase_ = WavaVecaProcessor(feature_extractor=__lowercase , tokenizer=__lowercase) processor.save_pretrained(__lowercase) UpperCamelCase_ = HubertForCTC(__lowercase) else: UpperCamelCase_ = HubertModel(__lowercase) if is_finetuned: UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1])}) else: UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]) UpperCamelCase_ = model[0].eval() recursively_load_weights(__lowercase , __lowercase , __lowercase) hf_wavavec.save_pretrained(__lowercase) if __name__ == "__main__": snake_case__ : str = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) snake_case__ : Optional[int] = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
23
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer lowercase__ :Optional[int] = logging.get_logger(__name__) lowercase__ :Union[str, Any] = {'vocab_file': 'vocab.txt'} lowercase__ :int = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } lowercase__ :Dict = { 'YituTech/conv-bert-base': 5_1_2, 'YituTech/conv-bert-medium-small': 5_1_2, 'YituTech/conv-bert-small': 5_1_2, } lowercase__ :List[str] = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class snake_case ( __UpperCAmelCase ): '''simple docstring''' _A : Union[str, Any] = VOCAB_FILES_NAMES _A : int = PRETRAINED_VOCAB_FILES_MAP _A : str = PRETRAINED_INIT_CONFIGURATION _A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : List[Any] = ConvBertTokenizer def __init__( self : int , __lowercase : List[Any]=None , __lowercase : int=None , __lowercase : Any=True , __lowercase : Dict="[UNK]" , __lowercase : Dict="[SEP]" , __lowercase : Dict="[PAD]" , __lowercase : int="[CLS]" , __lowercase : int="[MASK]" , __lowercase : List[str]=True , __lowercase : Optional[int]=None , **__lowercase : Any , ): '''simple docstring''' super().__init__( __lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , ) __UpperCAmelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars ): __UpperCAmelCase : Optional[Any] = getattr(__lowercase , normalizer_state.pop('''type''' ) ) __UpperCAmelCase : Any = do_lower_case __UpperCAmelCase : int = strip_accents __UpperCAmelCase : List[str] = tokenize_chinese_chars __UpperCAmelCase : Optional[Any] = normalizer_class(**__lowercase ) __UpperCAmelCase : Any = do_lower_case def A_ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Dict=None ): '''simple docstring''' __UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A_ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ): '''simple docstring''' __UpperCAmelCase : List[Any] = [self.sep_token_id] __UpperCAmelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A_ ( self : Optional[int] , __lowercase : str , __lowercase : Optional[str] = None ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase ) return tuple(__lowercase )
522
0
from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig SCREAMING_SNAKE_CASE__ : int = { """susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""", """susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""", } class __lowerCAmelCase ( _UpperCamelCase ): _UpperCamelCase : Union[str, Any] = """ernie_m""" _UpperCamelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self , snake_case = 250_002 , snake_case = 768 , snake_case = 12 , snake_case = 12 , snake_case = 3_072 , snake_case = "gelu" , snake_case = 0.1 , snake_case = 0.1 , snake_case = 514 , snake_case = 0.02 , snake_case = 1 , snake_case = 1E-05 , snake_case=None , snake_case=False , snake_case=0.0 , **snake_case , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=snake_case , **snake_case ) a__ : Optional[int] = vocab_size a__ : List[str] = hidden_size a__ : Optional[int] = num_hidden_layers a__ : str = num_attention_heads a__ : Dict = intermediate_size a__ : Tuple = hidden_act a__ : Tuple = hidden_dropout_prob a__ : Union[str, Any] = attention_probs_dropout_prob a__ : Dict = max_position_embeddings a__ : Tuple = initializer_range a__ : int = layer_norm_eps a__ : str = classifier_dropout a__ : List[Any] = is_decoder a__ : Optional[int] = act_dropout
708
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration SCREAMING_SNAKE_CASE__ : List[str] = { """tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""", """tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""", """base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""", """base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""", """small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""", """small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""", """medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""", """medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""", """large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""", """large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""", } def _A ( lowerCamelCase ): a__ : Optional[int] = ["layers", "blocks"] for k in ignore_keys: state_dict.pop(lowerCamelCase , lowerCamelCase ) SCREAMING_SNAKE_CASE__ : List[str] = { """blocks""": """layers""", """mlp.0""": """fc1""", """mlp.2""": """fc2""", """mlp_ln""": """final_layer_norm""", """.attn.query""": """.self_attn.q_proj""", """.attn.key""": """.self_attn.k_proj""", """.attn.value""": """.self_attn.v_proj""", """.attn_ln""": """.self_attn_layer_norm""", """.attn.out""": """.self_attn.out_proj""", """.cross_attn.query""": """.encoder_attn.q_proj""", """.cross_attn.key""": """.encoder_attn.k_proj""", """.cross_attn.value""": """.encoder_attn.v_proj""", """.cross_attn_ln""": """.encoder_attn_layer_norm""", """.cross_attn.out""": """.encoder_attn.out_proj""", """decoder.ln.""": """decoder.layer_norm.""", """encoder.ln.""": """encoder.layer_norm.""", """token_embedding""": """embed_tokens""", """encoder.positional_embedding""": """encoder.embed_positions.weight""", """decoder.positional_embedding""": """decoder.embed_positions.weight""", """ln_post""": """layer_norm""", } def _A ( lowerCamelCase ): a__ : Tuple = list(s_dict.keys() ) for key in keys: a__ : Optional[Any] = key for k, v in WHISPER_MAPPING.items(): if k in key: a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase ) print(F"""{key} -> {new_key}""" ) a__ : Dict = s_dict.pop(lowerCamelCase ) return s_dict def _A ( lowerCamelCase ): a__ , a__ : Any = emb.weight.shape a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) a__ : Optional[Any] = emb.weight.data return lin_layer def _A ( lowerCamelCase , lowerCamelCase ): os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase ) a__ : Optional[Any] = os.path.basename(lowerCamelCase ) a__ : List[Any] = url.split("/" )[-2] a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase ) if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ): raise RuntimeError(F"""{download_target} exists and is not a regular file""" ) if os.path.isfile(lowerCamelCase ): a__ : Any = open(lowerCamelCase , "rb" ).read() if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" ) with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output: with tqdm( total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop: while True: a__ : Optional[Any] = source.read(8192 ) if not buffer: break output.write(lowerCamelCase ) loop.update(len(lowerCamelCase ) ) a__ : Optional[int] = open(lowerCamelCase , "rb" ).read() if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( "Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." ) return model_bytes def _A ( lowerCamelCase , lowerCamelCase ): if ".pt" not in checkpoint_path: a__ : str = _download(_MODELS[checkpoint_path] ) else: a__ : str = torch.load(lowerCamelCase , map_location="cpu" ) a__ : Dict = original_checkpoint["dims"] a__ : Optional[int] = original_checkpoint["model_state_dict"] a__ : Any = state_dict["decoder.token_embedding.weight"] remove_ignore_keys_(lowerCamelCase ) rename_keys(lowerCamelCase ) a__ : Optional[Any] = True a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0] a__ : Tuple = WhisperConfig( vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , ) a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase ) a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase ) if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F""" but all the following weights are missing {missing}""" ) if tie_embeds: a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens ) else: a__ : str = proj_out_weights model.save_pretrained(lowerCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser() # # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
629
0
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} # See all BART models at https://huggingface.co/models?filter=bart _SCREAMING_SNAKE_CASE : Optional[Any] = { 'vocab_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json', }, 'merges_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt', }, } _SCREAMING_SNAKE_CASE : Optional[Any] = { 'facebook/bart-base': 1_024, 'facebook/bart-large': 1_024, 'facebook/bart-large-mnli': 1_024, 'facebook/bart-large-cnn': 1_024, 'facebook/bart-large-xsum': 1_024, 'yjernite/bart_eli5': 1_024, } @lru_cache() def __lowerCAmelCase ( ): _lowercase: Optional[Any] = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) _lowercase: int = bs[:] _lowercase: Optional[int] = 0 for b in range(2**8 ): if b not in bs: bs.append(lowerCAmelCase_ ) cs.append(2**8 + n ) n += 1 _lowercase: str = [chr(lowerCAmelCase_ ) for n in cs] return dict(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) def __lowerCAmelCase ( __magic_name__ ): _lowercase: Optional[int] = set() _lowercase: Dict = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _lowercase: Union[str, Any] = char return pairs class A ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : str = ['''input_ids''', '''attention_mask'''] def __init__( self : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any]="replace" , _UpperCamelCase : Optional[Any]="<s>" , _UpperCamelCase : Union[str, Any]="</s>" , _UpperCamelCase : List[Any]="</s>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Any="<unk>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="<mask>" , _UpperCamelCase : Any=False , **_UpperCamelCase : Dict , ): _lowercase: int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else bos_token _lowercase: str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else eos_token _lowercase: str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else sep_token _lowercase: Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else cls_token _lowercase: int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else unk_token _lowercase: Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else pad_token # Mask token behave like a normal word, i.e. include the space before it _lowercase: Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8") as vocab_handle: _lowercase: Optional[int] = json.load(__lowerCamelCase) _lowercase: Any = {v: k for k, v in self.encoder.items()} _lowercase: int = errors # how to handle errors in decoding _lowercase: List[str] = bytes_to_unicode() _lowercase: Tuple = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8") as merges_handle: _lowercase: Union[str, Any] = merges_handle.read().split("\n")[1:-1] _lowercase: List[str] = [tuple(merge.split()) for merge in bpe_merges] _lowercase: Any = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase)))) _lowercase: Tuple = {} _lowercase: Any = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _lowercase: Optional[int] = re.compile(r"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+") @property def UpperCAmelCase__ ( self : str): return len(self.encoder) def UpperCAmelCase__ ( self : str): return dict(self.encoder , **self.added_tokens_encoder) def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Any): if token in self.cache: return self.cache[token] _lowercase: str = tuple(__lowerCamelCase) _lowercase: Union[str, Any] = get_pairs(__lowerCamelCase) if not pairs: return token while True: _lowercase: Any = min(__lowerCamelCase , key=lambda _UpperCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf"))) if bigram not in self.bpe_ranks: break _lowercase , _lowercase: Tuple = bigram _lowercase: Optional[int] = [] _lowercase: Union[str, Any] = 0 while i < len(__lowerCamelCase): try: _lowercase: Tuple = word.index(__lowerCamelCase , __lowerCamelCase) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) _lowercase: Any = j if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 _lowercase: Dict = tuple(__lowerCamelCase) _lowercase: str = new_word if len(__lowerCamelCase) == 1: break else: _lowercase: Optional[Any] = get_pairs(__lowerCamelCase) _lowercase: Union[str, Any] = " ".join(__lowerCamelCase) _lowercase: Tuple = word return word def UpperCAmelCase__ ( self : str , _UpperCamelCase : List[Any]): _lowercase: List[str] = [] for token in re.findall(self.pat , __lowerCamelCase): _lowercase: Tuple = "".join( self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase).split(" ")) return bpe_tokens def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : List[str]): return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token)) def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : Optional[Any]): return self.decoder.get(__lowerCamelCase) def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : Any): _lowercase: str = "".join(__lowerCamelCase) _lowercase: Optional[int] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors) return text def UpperCAmelCase__ ( self : int , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None): if not os.path.isdir(__lowerCamelCase): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return _lowercase: Optional[int] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) _lowercase: Optional[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]) with open(__lowerCamelCase , "w" , encoding="utf-8") as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n") _lowercase: int = 0 with open(__lowerCamelCase , "w" , encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCamelCase: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!") _lowercase: Optional[int] = token_index writer.write(" ".join(__lowerCamelCase) + "\n") index += 1 return vocab_file, merge_file def UpperCAmelCase__ ( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowercase: List[Any] = [self.cls_token_id] _lowercase: List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase__ ( self : Any , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase)) + [1] return [1] + ([0] * len(__lowerCamelCase)) + [1, 1] + ([0] * len(__lowerCamelCase)) + [1] def UpperCAmelCase__ ( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None): _lowercase: Tuple = [self.sep_token_id] _lowercase: Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def UpperCAmelCase__ ( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str]=False , **_UpperCamelCase : Optional[Any]): _lowercase: Dict = kwargs.pop("add_prefix_space" , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase) > 0 and not text[0].isspace()): _lowercase: Any = " " + text return (text, kwargs)
226
"""simple docstring""" import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): def __UpperCAmelCase ( self : List[Any] ): """simple docstring""" _snake_case = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowerCamelCase , '''tf_padding''' ) ) self.parent.assertTrue(hasattr(__lowerCamelCase , '''depth_multiplier''' ) ) class UpperCAmelCase : def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=1_3 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : List[str]=3_2 , __lowerCamelCase : Dict=0.2_5 , __lowerCamelCase : Optional[Any]=8 , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]=1_0_2_4 , __lowerCamelCase : Union[str, Any]=3_2 , __lowerCamelCase : int="relu6" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Tuple=0.0_2 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Dict=True , __lowerCamelCase : Union[str, Any]=1_0 , __lowerCamelCase : Optional[Any]=None , ): """simple docstring""" _snake_case = parent _snake_case = batch_size _snake_case = num_channels _snake_case = image_size _snake_case = depth_multiplier _snake_case = min_depth _snake_case = tf_padding _snake_case = int(last_hidden_size * depth_multiplier ) _snake_case = output_stride _snake_case = hidden_act _snake_case = classifier_dropout_prob _snake_case = use_labels _snake_case = is_training _snake_case = num_labels _snake_case = initializer_range _snake_case = scope def __UpperCAmelCase ( self : List[Any] ): """simple docstring""" _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.num_labels ) _snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _snake_case = self.get_config() return config, pixel_values, labels, pixel_labels def __UpperCAmelCase ( self : Dict ): """simple docstring""" return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ): """simple docstring""" _snake_case = MobileNetVaModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() _snake_case = model(__lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __UpperCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ): """simple docstring""" _snake_case = self.num_labels _snake_case = MobileNetVaForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() _snake_case = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self : Dict ): """simple docstring""" _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,unittest.TestCase ): A__ : List[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () A__ : Optional[int] = ( {'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification} if is_torch_available() else {} ) A__ : List[str] = False A__ : Tuple = False A__ : List[Any] = False A__ : List[str] = False def __UpperCAmelCase ( self : Dict ): """simple docstring""" _snake_case = MobileNetVaModelTester(self ) _snake_case = MobileNetVaConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def __UpperCAmelCase ( self : Dict ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' ) def __UpperCAmelCase ( self : int ): """simple docstring""" pass @unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' ) def __UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip(reason='''MobileNetV1 does not output attentions''' ) def __UpperCAmelCase ( self : Any ): """simple docstring""" pass def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(__lowerCamelCase ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def __UpperCAmelCase ( self : Dict ): """simple docstring""" def check_hidden_states_output(__lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ): _snake_case = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): _snake_case = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) _snake_case = outputs.hidden_states _snake_case = 2_6 self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : List[str] ): """simple docstring""" _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def __UpperCAmelCase ( self : List[str] ): """simple docstring""" for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = MobileNetVaModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def snake_case ( ) -> int: _snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCAmelCase ( unittest.TestCase ): @cached_property def __UpperCAmelCase ( self : List[str] ): """simple docstring""" return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None ) @slow def __UpperCAmelCase ( self : Optional[int] ): """simple docstring""" _snake_case = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(__lowerCamelCase ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): _snake_case = model(**__lowerCamelCase ) # verify the logits _snake_case = torch.Size((1, 1_0_0_1) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) _snake_case = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
103
0
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever __snake_case : List[Any] = logging.getLogger(__name__) class __SCREAMING_SNAKE_CASE ( __lowercase): def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ): """simple docstring""" super().__init__( _UpperCamelCase , question_encoder_tokenizer=_UpperCamelCase , generator_tokenizer=_UpperCamelCase , index=_UpperCamelCase , init_retrieval=_UpperCamelCase , ) lowerCAmelCase__ = None def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" logger.info('initializing retrieval' ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info('dist initialized' ) # needs to be set manually lowerCAmelCase__ = self._infer_socket_ifname() # avoid clash with the NCCL port lowerCAmelCase__ = str(distributed_port + 1 ) lowerCAmelCase__ = dist.new_group(ranks=_UpperCamelCase , backend='gloo' ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info('dist not initialized / main' ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def UpperCamelCase__ ( self ): """simple docstring""" return dist.get_rank(group=self.process_group ) == 0 def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=torch.floataa ): """simple docstring""" lowerCAmelCase__ = torch.empty(_UpperCamelCase , dtype=_UpperCamelCase ) dist.scatter(_UpperCamelCase , src=0 , scatter_list=_UpperCamelCase , group=self.process_group ) return target_tensor def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = psutil.net_if_addrs() # a hacky way to deal with varying network interface names lowerCAmelCase__ = next((addr for addr in addrs if addr.startswith('e' )) , _UpperCamelCase ) return ifname def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" # single GPU training if not dist.is_initialized(): lowerCAmelCase__ , lowerCAmelCase__ = self._main_retrieve(_UpperCamelCase , _UpperCamelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_UpperCamelCase ) # distributed training lowerCAmelCase__ = dist.get_world_size(group=self.process_group ) # gather logic lowerCAmelCase__ = None if self._is_main(): lowerCAmelCase__ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_UpperCamelCase )] dist.gather(torch.tensor(_UpperCamelCase ) , dst=0 , gather_list=_UpperCamelCase , group=self.process_group ) # scatter logic lowerCAmelCase__ = question_hidden_states.shape[0] lowerCAmelCase__ = [] lowerCAmelCase__ = [] if self._is_main(): assert len(_UpperCamelCase ) == world_size lowerCAmelCase__ , lowerCAmelCase__ = self._main_retrieve(torch.cat(_UpperCamelCase ).numpy() , _UpperCamelCase ) lowerCAmelCase__ , lowerCAmelCase__ = torch.tensor(_UpperCamelCase ), torch.tensor(_UpperCamelCase ) lowerCAmelCase__ = self._chunk_tensor(_UpperCamelCase , _UpperCamelCase ) lowerCAmelCase__ = self._chunk_tensor(_UpperCamelCase , _UpperCamelCase ) lowerCAmelCase__ = self._scattered(_UpperCamelCase , [n_queries, n_docs] , target_type=torch.intaa ) lowerCAmelCase__ = self._scattered(_UpperCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_UpperCamelCase )
719
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __snake_case : Any = logging.get_logger(__name__) __snake_case : Tuple = {"""tokenizer_file""": """tokenizer.json"""} __snake_case : str = { """tokenizer_file""": { """bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""", """bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""", """bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""", """bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""", """bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""", """bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""", """bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""", }, } class __SCREAMING_SNAKE_CASE ( __lowercase): _SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : Union[str, Any] = ['''input_ids''', '''attention_mask'''] _SCREAMING_SNAKE_CASE : Union[str, Any] = None def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="<unk>" , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<pad>" , _UpperCamelCase=False , _UpperCamelCase=False , **_UpperCamelCase , ): """simple docstring""" super().__init__( _UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase , **_UpperCamelCase , ) lowerCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , _UpperCamelCase ) != add_prefix_space: lowerCAmelCase__ = getattr(_UpperCamelCase , pre_tok_state.pop('type' ) ) lowerCAmelCase__ = add_prefix_space lowerCAmelCase__ = pre_tok_class(**_UpperCamelCase ) lowerCAmelCase__ = add_prefix_space def UpperCamelCase__ ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = kwargs.get('is_split_into_words' , _UpperCamelCase ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with" ' pretokenized inputs.' ) return super()._batch_encode_plus(*_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase__ ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = kwargs.get('is_split_into_words' , _UpperCamelCase ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with" ' pretokenized inputs.' ) return super()._encode_plus(*_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ): """simple docstring""" lowerCAmelCase__ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase ) def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] ) if len(_UpperCamelCase ) > self.model_max_length: lowerCAmelCase__ = input_ids[-self.model_max_length :] return input_ids
365
0
from collections import defaultdict def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> str: __UpperCamelCase : Optional[Any] = first_str.lower().strip() __UpperCamelCase : Union[str, Any] = second_str.lower().strip() # Remove whitespace __UpperCamelCase : Dict = first_str.replace(""" """ , """""" ) __UpperCamelCase : Dict = second_str.replace(""" """ , """""" ) # Strings of different lengths are not anagrams if len(a_ ) != len(a_ ): return False # Default values for count should be 0 __UpperCamelCase : defaultdict[str, int] = defaultdict(a_ ) # For each character in input strings, # increment count in the corresponding for i in range(len(a_ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCamelCase = input('Enter the first string ').strip() UpperCamelCase = input('Enter the second string ').strip() UpperCamelCase = check_anagrams(input_a, input_b) print(F"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
269
'''simple docstring''' def __UpperCAmelCase ( ): _UpperCAmelCase : List[str] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] _UpperCAmelCase : Optional[Any] = 6 _UpperCAmelCase : Union[str, Any] = 1 _UpperCAmelCase : Optional[int] = 1_901 _UpperCAmelCase : str = 0 while year < 2_001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _UpperCAmelCase : List[str] = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 _UpperCAmelCase : Dict = day - 29 else: if day > days_per_month[month - 1]: month += 1 _UpperCAmelCase : int = day - days_per_month[month - 2] if month > 12: year += 1 _UpperCAmelCase : int = 1 if year < 2_001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
494
0
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase ( lowercase__ ): '''simple docstring''' lowerCAmelCase_ : int = (UnCLIPScheduler,) def A__ ( self , **lowerCAmelCase ): UpperCAmelCase_ = { "num_train_timesteps": 1000, "variance_type": "fixed_small_log", "clip_sample": True, "clip_sample_range": 1.0, "prediction_type": "epsilon", } config.update(**_UpperCAmelCase ) return config def A__ ( self ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def A__ ( self ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_UpperCAmelCase ) def A__ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=_UpperCAmelCase ) def A__ ( self ): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_UpperCAmelCase ) def A__ ( self ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def A__ ( self ): for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_UpperCAmelCase , prev_timestep=_UpperCAmelCase ) def A__ ( self ): UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(variance_type="fixed_small_log" ) UpperCAmelCase_ = scheduler_class(**_UpperCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5 def A__ ( self ): UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config(variance_type="learned_range" ) UpperCAmelCase_ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase_ = 0.5 assert scheduler._get_variance(1 , predicted_variance=_UpperCAmelCase ) - -10.1712790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=_UpperCAmelCase ) - -5.7998052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=_UpperCAmelCase ) - -0.0010011 < 1e-5 def A__ ( self ): UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase_ = scheduler.timesteps UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter UpperCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(_UpperCAmelCase ): # 1. predict noise residual UpperCAmelCase_ = model(_UpperCAmelCase , _UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 UpperCAmelCase_ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample UpperCAmelCase_ = pred_prev_sample UpperCAmelCase_ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase_ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 252.2682495 ) < 1e-2 assert abs(result_mean.item() - 0.3284743 ) < 1e-3 def A__ ( self ): UpperCAmelCase_ = self.scheduler_classes[0] UpperCAmelCase_ = self.get_scheduler_config() UpperCAmelCase_ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(25 ) UpperCAmelCase_ = scheduler.timesteps UpperCAmelCase_ = self.dummy_model() UpperCAmelCase_ = self.dummy_sample_deter UpperCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(_UpperCAmelCase ): # 1. predict noise residual UpperCAmelCase_ = model(_UpperCAmelCase , _UpperCAmelCase ) if i + 1 == timesteps.shape[0]: UpperCAmelCase_ = None else: UpperCAmelCase_ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 UpperCAmelCase_ = scheduler.step( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , prev_timestep=_UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample UpperCAmelCase_ = pred_prev_sample UpperCAmelCase_ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase_ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 258.2044983 ) < 1e-2 assert abs(result_mean.item() - 0.3362038 ) < 1e-3 def A__ ( self ): pass def A__ ( self ): pass
714
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE = { "configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE = [ "GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTBigCodeForSequenceClassification", "GPTBigCodeForTokenClassification", "GPTBigCodeForCausalLM", "GPTBigCodeModel", "GPTBigCodePreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
23
0
'''simple docstring''' def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ) -> Dict: # Return True if there is node that has not iterated. UpperCAmelCase : List[Any] = [False] * len(_lowerCAmelCase ) UpperCAmelCase : Tuple = [] queue.append(_lowerCAmelCase ) UpperCAmelCase : List[Any] = True while queue: UpperCAmelCase : str = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowerCAmelCase ) UpperCAmelCase : Optional[Any] = True UpperCAmelCase : int = u return visited[t] def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ) -> List[str]: # This array is filled by BFS and to store path UpperCAmelCase : Tuple = [-1] * (len(_lowerCAmelCase )) UpperCAmelCase : Tuple = 0 while bfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): UpperCAmelCase : Optional[int] = float('''Inf''' ) UpperCAmelCase : Tuple = sink while s != source: # Find the minimum value in select path UpperCAmelCase : Tuple = min(_lowerCAmelCase , graph[parent[s]][s] ) UpperCAmelCase : Optional[Any] = parent[s] max_flow += path_flow UpperCAmelCase : Union[str, Any] = sink while v != source: UpperCAmelCase : List[Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow UpperCAmelCase : Any = parent[v] return max_flow UpperCamelCase__: List[str] = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] UpperCamelCase__ , UpperCamelCase__: Optional[Any] = 0, 5 print(ford_fulkerson(graph, source, sink))
127
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__: int = logging.get_logger(__name__) # TODO Update this UpperCamelCase__: Any = { "facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json", # See all ESM models at https://huggingface.co/models?filter=esm } class SCREAMING_SNAKE_CASE( A__ ): """simple docstring""" lowerCamelCase__ = """esm""" def __init__( self : Union[str, Any] , __snake_case : Union[str, Any]=None , __snake_case : str=None , __snake_case : Tuple=None , __snake_case : Optional[int]=768 , __snake_case : List[Any]=12 , __snake_case : Dict=12 , __snake_case : str=3072 , __snake_case : Dict=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Dict=1026 , __snake_case : Union[str, Any]=0.02 , __snake_case : int=1E-12 , __snake_case : Optional[Any]="absolute" , __snake_case : List[str]=True , __snake_case : Dict=None , __snake_case : Tuple=False , __snake_case : List[Any]=False , __snake_case : Optional[int]=None , __snake_case : Optional[Any]=None , **__snake_case : List[Any] , ) -> List[Any]: super().__init__(pad_token_id=__snake_case , mask_token_id=__snake_case , **__snake_case ) UpperCAmelCase : str = vocab_size UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : List[Any] = num_hidden_layers UpperCAmelCase : str = num_attention_heads UpperCAmelCase : Any = intermediate_size UpperCAmelCase : Tuple = hidden_dropout_prob UpperCAmelCase : Dict = attention_probs_dropout_prob UpperCAmelCase : Union[str, Any] = max_position_embeddings UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : str = layer_norm_eps UpperCAmelCase : Union[str, Any] = position_embedding_type UpperCAmelCase : str = use_cache UpperCAmelCase : str = emb_layer_norm_before UpperCAmelCase : Union[str, Any] = token_dropout UpperCAmelCase : Union[str, Any] = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) UpperCAmelCase : Dict = EsmFoldConfig() elif isinstance(__snake_case , __snake_case ): UpperCAmelCase : Optional[int] = EsmFoldConfig(**__snake_case ) UpperCAmelCase : Optional[int] = esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) UpperCAmelCase : str = get_default_vocab_list() else: UpperCAmelCase : List[Any] = vocab_list else: UpperCAmelCase : int = None UpperCAmelCase : Optional[Any] = None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , __snake_case ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def A ( self : Optional[Any] ) -> Optional[int]: UpperCAmelCase : Dict = super().to_dict() if isinstance(self.esmfold_config , __snake_case ): UpperCAmelCase : List[Any] = self.esmfold_config.to_dict() return output @dataclass class SCREAMING_SNAKE_CASE: """simple docstring""" lowerCamelCase__ = None lowerCamelCase__ = True lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = 0 lowerCamelCase__ = True lowerCamelCase__ = False lowerCamelCase__ = 128 lowerCamelCase__ = None def A ( self : int ) -> str: if self.trunk is None: UpperCAmelCase : int = TrunkConfig() elif isinstance(self.trunk , __snake_case ): UpperCAmelCase : Tuple = TrunkConfig(**self.trunk ) def A ( self : Optional[Any] ) -> List[str]: UpperCAmelCase : Optional[int] = asdict(self ) UpperCAmelCase : List[Any] = self.trunk.to_dict() return output @dataclass class SCREAMING_SNAKE_CASE: """simple docstring""" lowerCamelCase__ = 48 lowerCamelCase__ = 1_024 lowerCamelCase__ = 128 lowerCamelCase__ = 32 lowerCamelCase__ = 32 lowerCamelCase__ = 32 lowerCamelCase__ = 0 lowerCamelCase__ = 0 lowerCamelCase__ = False lowerCamelCase__ = 4 lowerCamelCase__ = 128 lowerCamelCase__ = None def A ( self : Optional[int] ) -> Union[str, Any]: if self.structure_module is None: UpperCAmelCase : Optional[Any] = StructureModuleConfig() elif isinstance(self.structure_module , __snake_case ): UpperCAmelCase : Any = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" ) UpperCAmelCase : List[str] = self.sequence_state_dim // self.sequence_head_width UpperCAmelCase : List[Any] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" ) if self.dropout >= 0.4: raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" ) def A ( self : List[Any] ) -> int: UpperCAmelCase : Union[str, Any] = asdict(self ) UpperCAmelCase : List[str] = self.structure_module.to_dict() return output @dataclass class SCREAMING_SNAKE_CASE: """simple docstring""" lowerCamelCase__ = 384 lowerCamelCase__ = 128 lowerCamelCase__ = 16 lowerCamelCase__ = 128 lowerCamelCase__ = 12 lowerCamelCase__ = 4 lowerCamelCase__ = 8 lowerCamelCase__ = 0.1 lowerCamelCase__ = 8 lowerCamelCase__ = 1 lowerCamelCase__ = 2 lowerCamelCase__ = 7 lowerCamelCase__ = 10 lowerCamelCase__ = 1e-8 lowerCamelCase__ = 1e5 def A ( self : Tuple ) -> Any: return asdict(self ) def snake_case_ ( ) -> Optional[Any]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
127
1
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = OrderedDict( [ ('audio-spectrogram-transformer', 'ASTFeatureExtractor'), ('beit', 'BeitFeatureExtractor'), ('chinese_clip', 'ChineseCLIPFeatureExtractor'), ('clap', 'ClapFeatureExtractor'), ('clip', 'CLIPFeatureExtractor'), ('clipseg', 'ViTFeatureExtractor'), ('conditional_detr', 'ConditionalDetrFeatureExtractor'), ('convnext', 'ConvNextFeatureExtractor'), ('cvt', 'ConvNextFeatureExtractor'), ('data2vec-audio', 'Wav2Vec2FeatureExtractor'), ('data2vec-vision', 'BeitFeatureExtractor'), ('deformable_detr', 'DeformableDetrFeatureExtractor'), ('deit', 'DeiTFeatureExtractor'), ('detr', 'DetrFeatureExtractor'), ('dinat', 'ViTFeatureExtractor'), ('donut-swin', 'DonutFeatureExtractor'), ('dpt', 'DPTFeatureExtractor'), ('encodec', 'EncodecFeatureExtractor'), ('flava', 'FlavaFeatureExtractor'), ('glpn', 'GLPNFeatureExtractor'), ('groupvit', 'CLIPFeatureExtractor'), ('hubert', 'Wav2Vec2FeatureExtractor'), ('imagegpt', 'ImageGPTFeatureExtractor'), ('layoutlmv2', 'LayoutLMv2FeatureExtractor'), ('layoutlmv3', 'LayoutLMv3FeatureExtractor'), ('levit', 'LevitFeatureExtractor'), ('maskformer', 'MaskFormerFeatureExtractor'), ('mctct', 'MCTCTFeatureExtractor'), ('mobilenet_v1', 'MobileNetV1FeatureExtractor'), ('mobilenet_v2', 'MobileNetV2FeatureExtractor'), ('mobilevit', 'MobileViTFeatureExtractor'), ('nat', 'ViTFeatureExtractor'), ('owlvit', 'OwlViTFeatureExtractor'), ('perceiver', 'PerceiverFeatureExtractor'), ('poolformer', 'PoolFormerFeatureExtractor'), ('regnet', 'ConvNextFeatureExtractor'), ('resnet', 'ConvNextFeatureExtractor'), ('segformer', 'SegformerFeatureExtractor'), ('sew', 'Wav2Vec2FeatureExtractor'), ('sew-d', 'Wav2Vec2FeatureExtractor'), ('speech_to_text', 'Speech2TextFeatureExtractor'), ('speecht5', 'SpeechT5FeatureExtractor'), ('swiftformer', 'ViTFeatureExtractor'), ('swin', 'ViTFeatureExtractor'), ('swinv2', 'ViTFeatureExtractor'), ('table-transformer', 'DetrFeatureExtractor'), ('timesformer', 'VideoMAEFeatureExtractor'), ('tvlt', 'TvltFeatureExtractor'), ('unispeech', 'Wav2Vec2FeatureExtractor'), ('unispeech-sat', 'Wav2Vec2FeatureExtractor'), ('van', 'ConvNextFeatureExtractor'), ('videomae', 'VideoMAEFeatureExtractor'), ('vilt', 'ViltFeatureExtractor'), ('vit', 'ViTFeatureExtractor'), ('vit_mae', 'ViTFeatureExtractor'), ('vit_msn', 'ViTFeatureExtractor'), ('wav2vec2', 'Wav2Vec2FeatureExtractor'), ('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'), ('wavlm', 'Wav2Vec2FeatureExtractor'), ('whisper', 'WhisperFeatureExtractor'), ('xclip', 'CLIPFeatureExtractor'), ('yolos', 'YolosFeatureExtractor'), ] ) lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def snake_case ( UpperCAmelCase : str ): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: A = model_type_to_module_name(UpperCAmelCase ) A = importlib.import_module(f'.{module_name}', 'transformers.models' ) try: return getattr(UpperCAmelCase, UpperCAmelCase ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(UpperCAmelCase, '__name__', UpperCAmelCase ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. A = importlib.import_module('transformers' ) if hasattr(UpperCAmelCase, UpperCAmelCase ): return getattr(UpperCAmelCase, UpperCAmelCase ) return None def snake_case ( UpperCAmelCase : Union[str, os.PathLike], UpperCAmelCase : Optional[Union[str, os.PathLike]] = None, UpperCAmelCase : bool = False, UpperCAmelCase : bool = False, UpperCAmelCase : Optional[Dict[str, str]] = None, UpperCAmelCase : Optional[Union[bool, str]] = None, UpperCAmelCase : Optional[str] = None, UpperCAmelCase : bool = False, **UpperCAmelCase : List[str], ): A = get_file_from_repo( UpperCAmelCase, UpperCAmelCase, cache_dir=UpperCAmelCase, force_download=UpperCAmelCase, resume_download=UpperCAmelCase, proxies=UpperCAmelCase, use_auth_token=UpperCAmelCase, revision=UpperCAmelCase, local_files_only=UpperCAmelCase, ) if resolved_config_file is None: logger.info( 'Could not locate the feature extractor configuration file, will try to use the model config instead.' ) return {} with open(UpperCAmelCase, encoding='utf-8' ) as reader: return json.load(UpperCAmelCase ) class UpperCamelCase : """simple docstring""" def __init__( self : List[Any] ) -> Tuple: '''simple docstring''' raise EnvironmentError( 'AutoFeatureExtractor is designed to be instantiated ' 'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' ) @classmethod @replace_list_option_in_docstrings(_SCREAMING_SNAKE_CASE ) def A( cls : int ,_SCREAMING_SNAKE_CASE : Union[str, Any] ,**_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' A = kwargs.pop('config' ,_SCREAMING_SNAKE_CASE ) A = kwargs.pop('trust_remote_code' ,_SCREAMING_SNAKE_CASE ) A = True A , A = FeatureExtractionMixin.get_feature_extractor_dict(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) A = config_dict.get('feature_extractor_type' ,_SCREAMING_SNAKE_CASE ) A = None if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ): A = config_dict['auto_map']['AutoFeatureExtractor'] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): A = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) # It could be in `config.feature_extractor_type`` A = getattr(_SCREAMING_SNAKE_CASE ,'feature_extractor_type' ,_SCREAMING_SNAKE_CASE ) if hasattr(_SCREAMING_SNAKE_CASE ,'auto_map' ) and "AutoFeatureExtractor" in config.auto_map: A = config.auto_map['AutoFeatureExtractor'] if feature_extractor_class is not None: A = feature_extractor_class_from_name(_SCREAMING_SNAKE_CASE ) A = feature_extractor_auto_map is not None A = feature_extractor_class is not None or type(_SCREAMING_SNAKE_CASE ) in FEATURE_EXTRACTOR_MAPPING A = resolve_trust_remote_code( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if has_remote_code and trust_remote_code: A = get_class_from_dynamic_module( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) A = kwargs.pop('code_revision' ,_SCREAMING_SNAKE_CASE ) if os.path.isdir(_SCREAMING_SNAKE_CASE ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(_SCREAMING_SNAKE_CASE ) in FEATURE_EXTRACTOR_MAPPING: A = FEATURE_EXTRACTOR_MAPPING[type(_SCREAMING_SNAKE_CASE )] return feature_extractor_class.from_dict(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) raise ValueError( f'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ' f'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ' f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}' ) @staticmethod def A( _SCREAMING_SNAKE_CASE : Dict ,_SCREAMING_SNAKE_CASE : Tuple ) -> Any: '''simple docstring''' FEATURE_EXTRACTOR_MAPPING.register(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
110
def snake_case ( UpperCAmelCase : Optional[int], UpperCAmelCase : Union[str, Any] ): A = '' for i in table: res += inp[i - 1] return res def snake_case ( UpperCAmelCase : Union[str, Any] ): return data[1:] + data[0] def snake_case ( UpperCAmelCase : Union[str, Any], UpperCAmelCase : Dict ): A = '' for i in range(len(UpperCAmelCase ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def snake_case ( UpperCAmelCase : int, UpperCAmelCase : Optional[Any] ): A = int('0b' + data[0] + data[-1], 2 ) A = int('0b' + data[1:3], 2 ) return bin(s[row][col] )[2:] def snake_case ( UpperCAmelCase : Optional[Any], UpperCAmelCase : Any, UpperCAmelCase : Union[str, Any], UpperCAmelCase : Optional[Any], UpperCAmelCase : Optional[int] ): A = message[:4] A = message[4:] A = apply_table(UpperCAmelCase, UpperCAmelCase ) A = xor(UpperCAmelCase, UpperCAmelCase ) A = apply_sbox(UpperCAmelCase, temp[:4] ) # noqa: E741 A = apply_sbox(UpperCAmelCase, temp[4:] ) A = '0' * (2 - len(UpperCAmelCase )) + l # noqa: E741 A = '0' * (2 - len(UpperCAmelCase )) + r A = apply_table(l + r, UpperCAmelCase ) A = xor(UpperCAmelCase, UpperCAmelCase ) return temp + right if __name__ == "__main__": lowerCAmelCase_ = input('Enter 10 bit key: ') lowerCAmelCase_ = input('Enter 8 bit message: ') lowerCAmelCase_ = [6, 3, 7, 4, 8, 5, 10, 9] lowerCAmelCase_ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] lowerCAmelCase_ = [2, 4, 3, 1] lowerCAmelCase_ = [2, 6, 3, 1, 4, 8, 5, 7] lowerCAmelCase_ = [4, 1, 3, 5, 7, 2, 8, 6] lowerCAmelCase_ = [4, 1, 2, 3, 2, 3, 4, 1] lowerCAmelCase_ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] lowerCAmelCase_ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation lowerCAmelCase_ = apply_table(key, paa_table) lowerCAmelCase_ = temp[:5] lowerCAmelCase_ = temp[5:] lowerCAmelCase_ = left_shift(left) lowerCAmelCase_ = left_shift(right) lowerCAmelCase_ = apply_table(left + right, pa_table) lowerCAmelCase_ = left_shift(left) lowerCAmelCase_ = left_shift(right) lowerCAmelCase_ = left_shift(left) lowerCAmelCase_ = left_shift(right) lowerCAmelCase_ = apply_table(left + right, pa_table) # encryption lowerCAmelCase_ = apply_table(message, IP) lowerCAmelCase_ = function(expansion, sa, sa, keya, temp) lowerCAmelCase_ = temp[4:] + temp[:4] lowerCAmelCase_ = function(expansion, sa, sa, keya, temp) lowerCAmelCase_ = apply_table(temp, IP_inv) print('Cipher text is:', CT) # decryption lowerCAmelCase_ = apply_table(CT, IP) lowerCAmelCase_ = function(expansion, sa, sa, keya, temp) lowerCAmelCase_ = temp[4:] + temp[:4] lowerCAmelCase_ = function(expansion, sa, sa, keya, temp) lowerCAmelCase_ = apply_table(temp, IP_inv) print('Plain text after decypting is:', PT)
110
1
import math import random from typing import Any from .hill_climbing import SearchProblem def lowerCamelCase__ ( __A :Dict ,__A :bool = True ,__A :float = math.inf ,__A :float = -math.inf ,__A :float = math.inf ,__A :float = -math.inf ,__A :bool = False ,__A :float = 1_0_0 ,__A :float = 0.01 ,__A :float = 1 ,): """simple docstring""" __snake_case = False __snake_case = search_prob __snake_case = start_temperate __snake_case = [] __snake_case = 0 __snake_case = None while not search_end: __snake_case = current_state.score() if best_state is None or current_score > best_state.score(): __snake_case = current_state scores.append(__A ) iterations += 1 __snake_case = None __snake_case = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to __snake_case = random.randint(0 ,len(__A ) - 1 ) # picking a random neighbor __snake_case = neighbors.pop(__A ) __snake_case = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: __snake_case = change * -1 # in case we are finding minimum if change > 0: # improves the solution __snake_case = picked_neighbor else: __snake_case = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability __snake_case = picked_neighbor __snake_case = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor __snake_case = True else: __snake_case = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(__A ) ,__A ) plt.xlabel("""Iterations""" ) plt.ylabel("""Function values""" ) plt.show() return best_state if __name__ == "__main__": def lowerCamelCase__ ( __A :Optional[Any] ,__A :Optional[Any] ): """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) UpperCamelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) UpperCamelCase__ = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'and 50 > y > - 5 found via hill climbing: {local_min.score()}' ) # starting the problem with initial coordinates (12, 47) UpperCamelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) UpperCamelCase__ = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( '''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ''' F'and 50 > y > - 5 found via hill climbing: {local_min.score()}' ) def lowerCamelCase__ ( __A :int ,__A :Any ): """simple docstring""" return (3 * x**2) - (6 * y) UpperCamelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) UpperCamelCase__ = simulated_annealing(prob, find_max=False, visualization=True) print( '''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'{local_min.score()}' ) UpperCamelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) UpperCamelCase__ = simulated_annealing(prob, find_max=True, visualization=True) print( '''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ''' F'{local_min.score()}' )
268
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class __snake_case ( unittest.TestCase ): """simple docstring""" def a ( self ) -> Optional[int]: """simple docstring""" __snake_case = tempfile.mkdtemp() # fmt: off __snake_case = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on __snake_case = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) ) __snake_case = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] __snake_case = {"""unk_token""": """<unk>"""} __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_UpperCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_UpperCamelCase ) ) __snake_case = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073], """image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711], } __snake_case = os.path.join(self.tmpdirname , _UpperCamelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(_UpperCamelCase , _UpperCamelCase ) def a ( self , **_UpperCamelCase ) -> Optional[int]: """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase ) def a ( self , **_UpperCamelCase ) -> Union[str, Any]: """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase ) def a ( self , **_UpperCamelCase ) -> List[str]: """simple docstring""" return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase ) def a ( self ) -> int: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a ( self ) -> Optional[Any]: """simple docstring""" __snake_case = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __snake_case = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a ( self ) -> List[Any]: """simple docstring""" __snake_case = self.get_tokenizer() __snake_case = self.get_rust_tokenizer() __snake_case = self.get_image_processor() __snake_case = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) __snake_case = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase ) __snake_case = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) __snake_case = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer , _UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor , _UpperCamelCase ) def a ( self ) -> List[str]: """simple docstring""" __snake_case = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __snake_case = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __snake_case = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0 ) __snake_case = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCamelCase ) def a ( self ) -> Any: """simple docstring""" __snake_case = self.get_image_processor() __snake_case = self.get_tokenizer() __snake_case = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) __snake_case = self.prepare_image_inputs() __snake_case = image_processor(_UpperCamelCase , return_tensors="""np""" ) __snake_case = processor(images=_UpperCamelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def a ( self ) -> str: """simple docstring""" __snake_case = self.get_image_processor() __snake_case = self.get_tokenizer() __snake_case = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) __snake_case = """lower newer""" __snake_case = processor(text=_UpperCamelCase ) __snake_case = tokenizer(_UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a ( self ) -> str: """simple docstring""" __snake_case = self.get_image_processor() __snake_case = self.get_tokenizer() __snake_case = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) __snake_case = """lower newer""" __snake_case = self.prepare_image_inputs() __snake_case = processor(text=_UpperCamelCase , images=_UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(_UpperCamelCase ): processor() def a ( self ) -> Any: """simple docstring""" __snake_case = self.get_image_processor() __snake_case = self.get_tokenizer() __snake_case = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) __snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __snake_case = processor.batch_decode(_UpperCamelCase ) __snake_case = tokenizer.batch_decode(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) def a ( self ) -> int: """simple docstring""" __snake_case = self.get_image_processor() __snake_case = self.get_tokenizer() __snake_case = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) __snake_case = """lower newer""" __snake_case = self.prepare_image_inputs() __snake_case = processor(text=_UpperCamelCase , images=_UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
268
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available snake_case : int = { 'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'], 'tokenization_roc_bert': ['RoCBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : int = [ 'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertLayer', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
713
def snake_case__ ( __lowercase ) -> int: """simple docstring""" if divisor % 5 == 0 or divisor % 2 == 0: return 0 A__ : Tuple = 1 A__ : Union[str, Any] = 1 while repunit: A__ : Union[str, Any] = (1_0 * repunit + 1) % divisor repunit_index += 1 return repunit_index def snake_case__ ( __lowercase = 1_0_0_0_0_0_0 ) -> int: """simple docstring""" A__ : Dict = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(__lowercase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f"""{solution() = }""")
182
0
'''simple docstring''' import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case (UpperCamelCase , unittest.TestCase ): lowerCAmelCase__ :List[Any] = OpenAIGPTTokenizer lowerCAmelCase__ :Any = OpenAIGPTTokenizerFast lowerCAmelCase__ :Optional[int] = True lowerCAmelCase__ :Any = False def _a ( self ) -> str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] lowercase__ = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) ) lowercase__ = ["#version: 0.2", "l o", "lo w", "e r</w>", ""] lowercase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file ,"w" ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) ) with open(self.merges_file ,"w" ) as fp: fp.write("\n".join(UpperCAmelCase_ ) ) def _a ( self ,UpperCAmelCase_ ) -> str: return "lower newer", "lower newer" def _a ( self ) -> Optional[int]: lowercase__ = OpenAIGPTTokenizer(self.vocab_file ,self.merges_file ) lowercase__ = "lower" lowercase__ = ["low", "er</w>"] lowercase__ = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) lowercase__ = tokens + ["<unk>"] lowercase__ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,UpperCAmelCase_ ) def _a ( self ,UpperCAmelCase_=15 ) -> Dict: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase__ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) # Simple input lowercase__ = "This is a simple input" lowercase__ = ["This is a simple input 1", "This is a simple input 2"] lowercase__ = ("This is a simple input", "This is a pair") lowercase__ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(UpperCAmelCase_ ,tokenizer_r.encode ,UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,padding="max_length" ) # Simple input self.assertRaises(UpperCAmelCase_ ,tokenizer_r.encode_plus ,UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,padding="max_length" ) # Simple input self.assertRaises( UpperCAmelCase_ ,tokenizer_r.batch_encode_plus ,UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,padding="max_length" ,) # Pair input self.assertRaises(UpperCAmelCase_ ,tokenizer_r.encode ,UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,padding="max_length" ) # Pair input self.assertRaises(UpperCAmelCase_ ,tokenizer_r.encode_plus ,UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,padding="max_length" ) # Pair input self.assertRaises( UpperCAmelCase_ ,tokenizer_r.batch_encode_plus ,UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,padding="max_length" ,) def _a ( self ) -> Dict: pass @require_ftfy @require_spacy @require_tokenizers class snake_case (UpperCamelCase ): pass
267
'''simple docstring''' def lowerCamelCase ( _snake_case : int ,_snake_case : int ): '''simple docstring''' return "\n".join( f'''{number} * {i} = {number * i}''' for i in range(1 ,number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=10))
267
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __lowerCamelCase : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = ["BartphoTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys __lowerCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
721
from argparse import ArgumentParser from .env import EnvironmentCommand def lowerCamelCase_() -> Any: UpperCAmelCase = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" ) UpperCAmelCase = parser.add_subparsers(help="diffusers-cli command helpers" ) # Register commands EnvironmentCommand.register_subcommand(lowerCamelCase_ ) # Let's go UpperCAmelCase = parser.parse_args() if not hasattr(lowerCamelCase_ , "func" ): parser.print_help() exit(1 ) # Run UpperCAmelCase = args.func(lowerCamelCase_ ) service.run() if __name__ == "__main__": main()
457
0
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase_ : List[Any] = logging.get_logger(__name__) lowerCAmelCase_ : Union[str, Any] = { '''speechbrain/m-ctc-t-large''': '''https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json''', # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class __lowerCAmelCase ( __a ): snake_case : Dict = """mctct""" def __init__(self , lowerCAmelCase__=8_0_6_5 , lowerCAmelCase__=1_5_3_6 , lowerCAmelCase__=3_6 , lowerCAmelCase__=6_1_4_4 , lowerCAmelCase__=4 , lowerCAmelCase__=3_8_4 , lowerCAmelCase__=9_2_0 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=0.3 , lowerCAmelCase__="relu" , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=0.3 , lowerCAmelCase__=0.3 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0.3 , lowerCAmelCase__=1 , lowerCAmelCase__=(7,) , lowerCAmelCase__=(3,) , lowerCAmelCase__=8_0 , lowerCAmelCase__=1 , lowerCAmelCase__=None , lowerCAmelCase__="sum" , lowerCAmelCase__=False , **lowerCAmelCase__ , ): super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = vocab_size _UpperCAmelCase : Optional[int] = hidden_size _UpperCAmelCase : Union[str, Any] = num_hidden_layers _UpperCAmelCase : str = intermediate_size _UpperCAmelCase : Any = num_attention_heads _UpperCAmelCase : Union[str, Any] = attention_head_dim _UpperCAmelCase : Dict = max_position_embeddings _UpperCAmelCase : Union[str, Any] = layer_norm_eps _UpperCAmelCase : str = layerdrop _UpperCAmelCase : List[Any] = hidden_act _UpperCAmelCase : List[Any] = initializer_range _UpperCAmelCase : Optional[int] = hidden_dropout_prob _UpperCAmelCase : Any = attention_probs_dropout_prob _UpperCAmelCase : Optional[int] = pad_token_id _UpperCAmelCase : int = bos_token_id _UpperCAmelCase : Tuple = eos_token_id _UpperCAmelCase : Union[str, Any] = conv_glu_dim _UpperCAmelCase : Dict = conv_dropout _UpperCAmelCase : Tuple = num_conv_layers _UpperCAmelCase : List[Any] = input_feat_per_channel _UpperCAmelCase : Dict = input_channels _UpperCAmelCase : Tuple = conv_channels _UpperCAmelCase : Tuple = ctc_loss_reduction _UpperCAmelCase : Dict = ctc_zero_infinity # prevents config testing fail with exporting to json _UpperCAmelCase : Tuple = list(lowerCAmelCase__ ) _UpperCAmelCase : Any = list(lowerCAmelCase__ ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """ F"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, " F"`config.num_conv_layers = {self.num_conv_layers}`." )
414
'''simple docstring''' import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class __lowerCAmelCase : def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ): _UpperCAmelCase : List[Any] = parent _UpperCAmelCase : List[str] = batch_size _UpperCAmelCase : List[Any] = seq_length _UpperCAmelCase : int = is_training _UpperCAmelCase : Optional[int] = use_input_mask _UpperCAmelCase : Optional[Any] = use_token_type_ids _UpperCAmelCase : Tuple = use_labels _UpperCAmelCase : Optional[int] = vocab_size _UpperCAmelCase : Any = hidden_size _UpperCAmelCase : int = num_hidden_layers _UpperCAmelCase : Union[str, Any] = num_attention_heads _UpperCAmelCase : Optional[Any] = intermediate_size _UpperCAmelCase : Optional[int] = hidden_act _UpperCAmelCase : int = hidden_dropout_prob _UpperCAmelCase : Optional[int] = attention_probs_dropout_prob _UpperCAmelCase : List[str] = max_position_embeddings _UpperCAmelCase : str = type_vocab_size _UpperCAmelCase : Union[str, Any] = type_sequence_label_size _UpperCAmelCase : Optional[int] = initializer_range _UpperCAmelCase : Optional[Any] = num_labels _UpperCAmelCase : Optional[Any] = num_choices _UpperCAmelCase : Any = scope def snake_case_ (self ): _UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase : Dict = None if self.use_input_mask: _UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase : List[Any] = None _UpperCAmelCase : Any = None _UpperCAmelCase : str = None _UpperCAmelCase : Tuple = None if self.use_labels: _UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case_ (self ): return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowerCAmelCase__ , ) def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : str = FalconModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): _UpperCAmelCase : Union[str, Any] = True _UpperCAmelCase : Any = FalconModel(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : Union[str, Any] = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , ) _UpperCAmelCase : Any = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , ) _UpperCAmelCase : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): _UpperCAmelCase : List[Any] = FalconForCausalLM(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): _UpperCAmelCase : Optional[int] = True _UpperCAmelCase : str = True _UpperCAmelCase : List[Any] = FalconForCausalLM(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() # first forward pass _UpperCAmelCase : int = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ , ) _UpperCAmelCase : Any = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) _UpperCAmelCase : Tuple = torch.cat([input_mask, next_mask] , dim=-1 ) _UpperCAmelCase : Tuple = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["""hidden_states"""][0] _UpperCAmelCase : Union[str, Any] = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["""hidden_states"""][0] # select random slice _UpperCAmelCase : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() _UpperCAmelCase : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach() _UpperCAmelCase : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) ) def snake_case_ (self ): _UpperCAmelCase : List[str] = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) : Tuple = config_and_inputs _UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( __a , __a , __a , unittest.TestCase ): snake_case : Optional[Any] = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) snake_case : Dict = (FalconForCausalLM,) if is_torch_available() else () snake_case : Dict = ( { """feature-extraction""": FalconModel, """text-classification""": FalconForSequenceClassification, """text-generation""": FalconForCausalLM, """question-answering""": FalconForQuestionAnswering, """token-classification""": FalconForTokenClassification, """zero-shot""": FalconForSequenceClassification, } if is_torch_available() else {} ) snake_case : Optional[Any] = False snake_case : Any = False def snake_case_ (self ): _UpperCAmelCase : Dict = FalconModelTester(self ) _UpperCAmelCase : Any = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 ) def snake_case_ (self ): self.config_tester.run_common_tests() def snake_case_ (self ): _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def snake_case_ (self ): _UpperCAmelCase , *_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: _UpperCAmelCase : Dict = alibi self.model_tester.create_and_check_model(lowerCAmelCase__ , *lowerCAmelCase__ ) def snake_case_ (self ): _UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : Tuple = 3 _UpperCAmelCase : Tuple = input_dict["""input_ids"""] _UpperCAmelCase : List[str] = input_ids.ne(1 ).to(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _UpperCAmelCase : int = FalconForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def snake_case_ (self ): _UpperCAmelCase , _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : Dict = 3 _UpperCAmelCase : int = """single_label_classification""" _UpperCAmelCase : int = input_dict["""input_ids"""] _UpperCAmelCase : Tuple = input_ids.ne(1 ).to(lowerCAmelCase__ ) _UpperCAmelCase : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _UpperCAmelCase : List[str] = FalconForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def snake_case_ (self ): _UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : List[str] = input_dict["""input_ids"""] _UpperCAmelCase : str = FalconForCausalLM(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : List[Any] = model(lowerCAmelCase__ , use_cache=lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = input_ids.shape[0] _UpperCAmelCase : int = model._convert_to_rw_cache(result.past_key_values ) _UpperCAmelCase : List[str] = model._convert_cache_to_standard_format(lowerCAmelCase__ , lowerCAmelCase__ ) for layer in range(len(lowerCAmelCase__ ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def snake_case_ (self ): _UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : Tuple = 3 _UpperCAmelCase : List[Any] = """multi_label_classification""" _UpperCAmelCase : List[str] = input_dict["""input_ids"""] _UpperCAmelCase : Tuple = input_ids.ne(1 ).to(lowerCAmelCase__ ) _UpperCAmelCase : str = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) _UpperCAmelCase : Optional[Any] = FalconForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def snake_case_ (self ): # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(lowerCAmelCase__ , """use_cache""" ): return _UpperCAmelCase : Dict = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ ) if "use_cache" not in inputs: _UpperCAmelCase : Union[str, Any] = True _UpperCAmelCase : int = model(**lowerCAmelCase__ ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return _UpperCAmelCase : Tuple = ( getattr(lowerCAmelCase__ , """decoder_layers""" , lowerCAmelCase__ ) or getattr(lowerCAmelCase__ , """num_decoder_layers""" , lowerCAmelCase__ ) or config.num_hidden_layers ) _UpperCAmelCase : int = getattr(lowerCAmelCase__ , """num_kv_heads""" , config.num_attention_heads ) _UpperCAmelCase : List[str] = getattr(lowerCAmelCase__ , """d_model""" , config.hidden_size ) _UpperCAmelCase : Dict = embed_dim // num_attention_heads _UpperCAmelCase : Any = outputs["""past_key_values"""] self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) _UpperCAmelCase , _UpperCAmelCase : Dict = inputs["""input_ids"""].shape for i in range(lowerCAmelCase__ ): if config.new_decoder_architecture: _UpperCAmelCase : str = config.num_attention_heads elif config.multi_query: _UpperCAmelCase : Optional[Any] = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class __lowerCAmelCase ( unittest.TestCase ): @slow def snake_case_ (self ): _UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" ) _UpperCAmelCase : str = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" ) model.eval() model.to(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(lowerCAmelCase__ ) _UpperCAmelCase : Any = ( """My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.""" ) _UpperCAmelCase : Optional[int] = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=1_9 ) _UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowerCAmelCase__ )[0] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def snake_case_ (self ): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: _UpperCAmelCase : int = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = FalconForCausalLM.from_pretrained(lowerCAmelCase__ ) model.eval() model.to(lowerCAmelCase__ ) _UpperCAmelCase : Any = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(lowerCAmelCase__ ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=4 ) model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=4 ) model.generate(**lowerCAmelCase__ , num_beams=2 , max_new_tokens=4 ) @slow def snake_case_ (self ): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: _UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = FalconForCausalLM.from_pretrained(lowerCAmelCase__ ) model.eval() model.to(device=lowerCAmelCase__ ) _UpperCAmelCase : str = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(lowerCAmelCase__ ) # Test results are the same with and without cache _UpperCAmelCase : Optional[int] = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=2_0 , use_cache=lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=2_0 , use_cache=lowerCAmelCase__ ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
414
1
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class a ( UpperCAmelCase__ ): UpperCamelCase : Tuple = 'blenderbot-small' UpperCamelCase : Union[str, Any] = ['past_key_values'] UpperCamelCase : List[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : List[Any] , lowerCAmelCase : Dict=5_0265 , lowerCAmelCase : Dict=512 , lowerCAmelCase : Any=8 , lowerCAmelCase : str=2048 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : List[str]=8 , lowerCAmelCase : str=2048 , lowerCAmelCase : Optional[int]=16 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : int=0.0 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : int="gelu" , lowerCAmelCase : Union[str, Any]=512 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : str=False , lowerCAmelCase : str=0 , lowerCAmelCase : int=1 , lowerCAmelCase : Any=2 , lowerCAmelCase : int=2 , **lowerCAmelCase : Tuple , ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE_: Union[str, Any] =vocab_size SCREAMING_SNAKE_CASE_: Any =max_position_embeddings SCREAMING_SNAKE_CASE_: Any =d_model SCREAMING_SNAKE_CASE_: int =encoder_ffn_dim SCREAMING_SNAKE_CASE_: Optional[int] =encoder_layers SCREAMING_SNAKE_CASE_: Any =encoder_attention_heads SCREAMING_SNAKE_CASE_: Optional[Any] =decoder_ffn_dim SCREAMING_SNAKE_CASE_: Optional[int] =decoder_layers SCREAMING_SNAKE_CASE_: List[str] =decoder_attention_heads SCREAMING_SNAKE_CASE_: List[Any] =dropout SCREAMING_SNAKE_CASE_: Dict =attention_dropout SCREAMING_SNAKE_CASE_: Optional[Any] =activation_dropout SCREAMING_SNAKE_CASE_: Union[str, Any] =activation_function SCREAMING_SNAKE_CASE_: Any =init_std SCREAMING_SNAKE_CASE_: List[str] =encoder_layerdrop SCREAMING_SNAKE_CASE_: Optional[Any] =decoder_layerdrop SCREAMING_SNAKE_CASE_: Optional[Any] =use_cache SCREAMING_SNAKE_CASE_: int =encoder_layers SCREAMING_SNAKE_CASE_: str =scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , ) class a ( UpperCAmelCase__ ): @property def lowerCamelCase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE_: Tuple =OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: SCREAMING_SNAKE_CASE_: List[Any] ={0: """batch"""} SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """past_decoder_sequence + sequence"""} else: SCREAMING_SNAKE_CASE_: Optional[int] ={0: """batch""", 1: """decoder_sequence"""} SCREAMING_SNAKE_CASE_: Optional[int] ={0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. SCREAMING_SNAKE_CASE_: Union[str, Any] =OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =self.num_layers for i in range(lowerCAmelCase ): SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 2: """past_sequence + sequence"""} SCREAMING_SNAKE_CASE_: Union[str, Any] ={0: """batch""", 2: """past_sequence + sequence"""} else: SCREAMING_SNAKE_CASE_: int =OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property def lowerCamelCase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE_: Tuple =super().outputs else: SCREAMING_SNAKE_CASE_: Optional[Any] =super(lowerCAmelCase , self ).outputs if self.use_past: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.num_layers for i in range(lowerCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] ={0: """batch""", 2: """past_sequence + sequence"""} SCREAMING_SNAKE_CASE_: List[Any] ={0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Optional[int] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # Generate decoder inputs SCREAMING_SNAKE_CASE_: List[str] =seq_length if not self.use_past else 1 SCREAMING_SNAKE_CASE_: Union[str, Any] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] ={f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} SCREAMING_SNAKE_CASE_: Union[str, Any] =dict(**lowerCAmelCase , **lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =common_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE_: Optional[Any] =common_inputs["""decoder_input_ids"""].shape[1] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =self.num_attention_heads SCREAMING_SNAKE_CASE_: Optional[int] =( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) SCREAMING_SNAKE_CASE_: int =decoder_seq_length + 3 SCREAMING_SNAKE_CASE_: str =( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) SCREAMING_SNAKE_CASE_: List[str] =torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 ) SCREAMING_SNAKE_CASE_: Any =[] # If the number of encoder and decoder layers are present in the model configuration, both are considered SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.num_layers SCREAMING_SNAKE_CASE_: Dict =min(lowerCAmelCase , lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple =max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers SCREAMING_SNAKE_CASE_: Optional[int] ="""encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(lowerCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), ) ) # TODO: test this. SCREAMING_SNAKE_CASE_: List[Any] =encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(lowerCAmelCase , lowerCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) ) return common_inputs def lowerCamelCase__ ( self : Any , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Optional[int] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =common_inputs["""input_ids"""].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE_: List[Any] =seqlen + 2 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =self.num_layers SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.num_attention_heads SCREAMING_SNAKE_CASE_: List[str] =( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) SCREAMING_SNAKE_CASE_: Optional[int] =common_inputs["""attention_mask"""].dtype SCREAMING_SNAKE_CASE_: List[Any] =torch.cat( [common_inputs["""attention_mask"""], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 ) SCREAMING_SNAKE_CASE_: List[str] =[ (torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase ) ] return common_inputs def lowerCamelCase__ ( self : Any , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Tuple =compute_effective_axis_dimension( lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX SCREAMING_SNAKE_CASE_: str =tokenizer.num_special_tokens_to_add(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: str =compute_effective_axis_dimension( lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence SCREAMING_SNAKE_CASE_: Dict =[""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size SCREAMING_SNAKE_CASE_: Tuple =dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) ) return common_inputs def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE_: Dict =self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) elif self.task == "causal-lm": SCREAMING_SNAKE_CASE_: int =self._generate_dummy_inputs_for_causal_lm( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) else: SCREAMING_SNAKE_CASE_: Tuple =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) return common_inputs def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ) -> int: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: SCREAMING_SNAKE_CASE_: Tuple =super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) else: SCREAMING_SNAKE_CASE_: List[str] =super(lowerCAmelCase , self )._flatten_past_key_values_( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
36
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig _UpperCAmelCase = { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""", } class a ( UpperCAmelCase__ ): UpperCamelCase : Any = 'albert' def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] =vocab_size SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers SCREAMING_SNAKE_CASE_: Any =num_hidden_groups SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act SCREAMING_SNAKE_CASE_: int =intermediate_size SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE_: int =max_position_embeddings SCREAMING_SNAKE_CASE_: Any =type_vocab_size SCREAMING_SNAKE_CASE_: int =initializer_range SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob SCREAMING_SNAKE_CASE_: int =position_embedding_type class a ( UpperCAmelCase__ ): @property def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""} else: SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
36
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## SCREAMING_SNAKE_CASE : int = 16 SCREAMING_SNAKE_CASE : Optional[Any] = 32 def __A ( _A , _A = 16 ): """simple docstring""" __a = AutoTokenizer.from_pretrained("bert-base-cased" ) __a = load_dataset("glue" , "mrpc" ) def tokenize_function(_A ): # max_length=None => use the model max length (it's actually the default) __a = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_A , max_length=_A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __a = datasets.map( _A , batched=_A , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __a = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_A ): # On TPU it's best to pad everything to the same length or training will be very slow. __a = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __a = 16 elif accelerator.mixed_precision != "no": __a = 8 else: __a = None return tokenizer.pad( _A , padding="longest" , max_length=_A , pad_to_multiple_of=_A , return_tensors="pt" , ) # Instantiate dataloaders. __a = DataLoader( tokenized_datasets["train"] , shuffle=_A , collate_fn=_A , batch_size=_A ) __a = DataLoader( tokenized_datasets["validation"] , shuffle=_A , collate_fn=_A , batch_size=_A ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders SCREAMING_SNAKE_CASE : Dict = mocked_dataloaders # noqa: F811 def __A ( _A , _A ): """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , _A ) == "1": __a = 2 # New Code # __a = int(args.gradient_accumulation_steps ) # Initialize accelerator __a = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_A ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __a = config["lr"] __a = int(config["num_epochs"] ) __a = int(config["seed"] ) __a = int(config["batch_size"] ) __a = evaluate.load("glue" , "mrpc" ) set_seed(_A ) __a , __a = get_dataloaders(_A , _A ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __a = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_A ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __a = model.to(accelerator.device ) # Instantiate optimizer __a = AdamW(params=model.parameters() , lr=_A ) # Instantiate scheduler __a = get_linear_schedule_with_warmup( optimizer=_A , num_warmup_steps=100 , num_training_steps=(len(_A ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __a , __a , __a , __a , __a = accelerator.prepare( _A , _A , _A , _A , _A ) # Now we train the model for epoch in range(_A ): model.train() for step, batch in enumerate(_A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_A ): __a = model(**_A ) __a = output.loss accelerator.backward(_A ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __a = model(**_A ) __a = outputs.logits.argmax(dim=-1 ) __a , __a = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_A , references=_A , ) __a = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , _A ) def __A ( ): """simple docstring""" __a = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_A , default=_A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=_A , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) __a = parser.parse_args() __a = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_A , _A ) if __name__ == "__main__": main()
197
'''simple docstring''' from __future__ import annotations from scipy.special import comb # type: ignore class _SCREAMING_SNAKE_CASE : def __init__( self : Optional[Any] , a__ : list[tuple[float, float]] ): __magic_name__ = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. __magic_name__ = len(a__ ) - 1 def snake_case__ ( self : List[str] , a__ : float ): assert 0 <= t <= 1, "Time t must be between 0 and 1." __magic_name__ = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , a__ ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(a__ ) , 5 ) == 1 return output_values def snake_case__ ( self : Optional[Any] , a__ : float ): assert 0 <= t <= 1, "Time t must be between 0 and 1." __magic_name__ = self.basis_function(a__ ) __magic_name__ = 0.0 __magic_name__ = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def snake_case__ ( self : Optional[int] , a__ : float = 0.01 ): from matplotlib import pyplot as plt # type: ignore __magic_name__ = [] # x coordinates of points to plot __magic_name__ = [] # y coordinates of points to plot __magic_name__ = 0.0 while t <= 1: __magic_name__ = self.bezier_curve_function(a__ ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size __magic_name__ = [i[0] for i in self.list_of_points] __magic_name__ = [i[1] for i in self.list_of_points] plt.plot( a__ , a__ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , ) plt.scatter(a__ , a__ , color='''red''' , label='''Control Points''' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
432
0
from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class SCREAMING_SNAKE_CASE : _UpperCamelCase : Dict = field( metadata={'help': 'The output directory where the model will be written.'} , ) _UpperCamelCase : str = field( metadata={ 'help': ( 'The encoder model checkpoint for weights initialization.' 'Don\'t set if you want to train an encoder model from scratch.' ) } , ) _UpperCamelCase : Any = field( metadata={ 'help': ( 'The decoder model checkpoint for weights initialization.' 'Don\'t set if you want to train a decoder model from scratch.' ) } , ) _UpperCamelCase : Dict = field( default=UpperCamelCase_ , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} ) _UpperCamelCase : Dict = field( default=UpperCamelCase_ , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} ) def __UpperCamelCase () -> Optional[Any]: lowercase__ = HfArgumentParser((ModelArguments,) ) (lowercase__ ) = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: lowercase__ = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: lowercase__ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: lowercase__ = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: lowercase__ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed lowercase__ = True lowercase__ = True lowercase__ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=_lowercase , decoder_config=_lowercase , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens lowercase__ = decoder_config.decoder_start_token_id lowercase__ = decoder_config.pad_token_id if decoder_start_token_id is None: lowercase__ = decoder_config.bos_token_id if pad_token_id is None: lowercase__ = decoder_config.eos_token_id # This is necessary to make Flax's generate() work lowercase__ = decoder_config.eos_token_id lowercase__ = decoder_start_token_id lowercase__ = pad_token_id lowercase__ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) lowercase__ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) lowercase__ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
712
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class SCREAMING_SNAKE_CASE (unittest.TestCase ): def __init__( self : Any , a : str , a : List[Any]=7 , a : int=3 , a : int=18 , a : Optional[Any]=30 , a : Optional[int]=400 , a : int=True , a : Tuple=None , a : Optional[Any]=True , a : str=False , a : str=True , a : int=True , a : Tuple=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , )-> Optional[int]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size if size is not None else {'height': 18, 'width': 20} lowercase__ = do_thumbnail lowercase__ = do_align_axis lowercase__ = do_pad lowercase__ = do_normalize lowercase__ = image_mean lowercase__ = image_std def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]: """simple docstring""" lowercase__ = DonutImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Any )-> int: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a , 'do_resize' ) ) self.assertTrue(hasattr(a , 'size' ) ) self.assertTrue(hasattr(a , 'do_thumbnail' ) ) self.assertTrue(hasattr(a , 'do_align_long_axis' ) ) self.assertTrue(hasattr(a , 'do_pad' ) ) self.assertTrue(hasattr(a , 'do_normalize' ) ) self.assertTrue(hasattr(a , 'image_mean' ) ) self.assertTrue(hasattr(a , 'image_std' ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict: """simple docstring""" lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict: """simple docstring""" pass @is_flaky() def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a ) for image in image_inputs: self.assertIsInstance(a , Image.Image ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a ) for image in image_inputs: self.assertIsInstance(a , np.ndarray ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a ) for image in image_inputs: self.assertIsInstance(a , torch.Tensor ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
45
0
"""simple docstring""" import math from numpy import inf from scipy.integrate import quad def lowerCAmelCase__ ( __magic_name__ ) ->float: if num <= 0: raise ValueError("math domain error" ) return quad(__magic_name__ , 0 , __magic_name__ , args=(__magic_name__) )[0] def lowerCAmelCase__ ( __magic_name__ , __magic_name__ ) ->float: return math.pow(__magic_name__ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
118
"""simple docstring""" import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''vocab_file''': '''vocab.txt''', '''merges_file''': '''bpe.codes''', } _lowercase = { '''vocab_file''': { '''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''', '''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''', }, '''merges_file''': { '''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''', '''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''', }, } _lowercase = { '''vinai/phobert-base''': 256, '''vinai/phobert-large''': 256, } def lowerCAmelCase__ ( __magic_name__ ) ->Optional[int]: __lowercase = set() __lowercase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowercase = char __lowercase = set(__magic_name__ ) return pairs class __a ( __a ): '''simple docstring''' _lowerCamelCase : int = VOCAB_FILES_NAMES _lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , **_lowerCamelCase , ) -> Tuple: '''simple docstring''' super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , ) __lowercase = vocab_file __lowercase = merges_file __lowercase = {} __lowercase = 0 __lowercase = 1 __lowercase = 2 __lowercase = 3 self.add_from_file(_lowerCamelCase ) __lowercase = {v: k for k, v in self.encoder.items()} with open(_lowerCamelCase , encoding="utf-8" ) as merges_handle: __lowercase = merges_handle.read().split("\n" )[:-1] __lowercase = [tuple(merge.split()[:-1] ) for merge in merges] __lowercase = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) __lowercase = {} def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowercase = [self.cls_token_id] __lowercase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]: '''simple docstring''' __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' return len(self.encoder ) def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[Any]: '''simple docstring''' if token in self.cache: return self.cache[token] __lowercase = tuple(_lowerCamelCase ) __lowercase = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) __lowercase = get_pairs(_lowerCamelCase ) if not pairs: return token while True: __lowercase = min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __lowercase , __lowercase = bigram __lowercase = [] __lowercase = 0 while i < len(_lowerCamelCase ): try: __lowercase = word.index(_lowerCamelCase , _lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowercase = j if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowercase = tuple(_lowerCamelCase ) __lowercase = new_word if len(_lowerCamelCase ) == 1: break else: __lowercase = get_pairs(_lowerCamelCase ) __lowercase = "@@ ".join(_lowerCamelCase ) __lowercase = word[:-4] __lowercase = word return word def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> str: '''simple docstring''' __lowercase = [] __lowercase = re.findall(R"\S+\n?" , _lowerCamelCase ) for token in words: split_tokens.extend(list(self.bpe(_lowerCamelCase ).split(" " ) ) ) return split_tokens def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Optional[int]: '''simple docstring''' return self.decoder.get(_lowerCamelCase , self.unk_token ) def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Tuple: '''simple docstring''' __lowercase = " ".join(_lowerCamelCase ).replace("@@ " , "" ).strip() return out_string def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(_lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __lowercase = os.path.join( _lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __lowercase = os.path.join( _lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ): copyfile(self.vocab_file , _lowerCamelCase ) if os.path.abspath(self.merges_file ) != os.path.abspath(_lowerCamelCase ): copyfile(self.merges_file , _lowerCamelCase ) return out_vocab_file, out_merge_file def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Dict: '''simple docstring''' if isinstance(_lowerCamelCase , _lowerCamelCase ): try: with open(_lowerCamelCase , "r" , encoding="utf-8" ) as fd: self.add_from_file(_lowerCamelCase ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' ) return __lowercase = f.readlines() for lineTmp in lines: __lowercase = lineTmp.strip() __lowercase = line.rfind(" " ) if idx == -1: raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" ) __lowercase = line[:idx] __lowercase = len(self.encoder )
118
1
def UpperCamelCase ( snake_case__): lowerCAmelCase_ : Tuple = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def UpperCamelCase ( snake_case__ = 1_00): lowerCAmelCase_ : str = 1 lowerCAmelCase_ : Tuple = 2 for i in range(2 , max_n + 1): lowerCAmelCase_ : Union[str, Any] = pre_numerator lowerCAmelCase_ : int = 2 * i // 3 if i % 3 == 0 else 1 lowerCAmelCase_ : Union[str, Any] = cur_numerator lowerCAmelCase_ : List[Any] = e_cont * pre_numerator + temp return sum_digits(snake_case__) if __name__ == "__main__": print(f"{solution() = }")
703
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) _lowercase = { '''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''], '''processing_speech_to_text''': ['''Speech2TextProcessor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''Speech2TextTokenizer'''] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''Speech2TextFeatureExtractor'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSpeech2TextForConditionalGeneration''', '''TFSpeech2TextModel''', '''TFSpeech2TextPreTrainedModel''', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Speech2TextForConditionalGeneration''', '''Speech2TextModel''', '''Speech2TextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
683
0
from __future__ import annotations def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ): __magic_name__ : Tuple =sorted(numsa + numsa ) __magic_name__ , __magic_name__ : Optional[Any] =divmod(len(lowerCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ : Union[str, Any] = [float(x) for x in input("Enter the elements of first array: ").split()] UpperCAmelCase_ : Dict = [float(x) for x in input("Enter the elements of second array: ").split()] print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
21
"""simple docstring""" def _lowerCamelCase ( lowerCamelCase__ : Optional[Any] ): lowercase__ : List[str] = len(lowerCamelCase__ ) lowercase__ : Optional[int] = sum(lowerCamelCase__ ) lowercase__ : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): lowercase__ : int = True for i in range(1 , s + 1 ): lowercase__ : int = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): lowercase__ : Optional[Any] = dp[i][j - 1] if arr[i - 1] <= j: lowercase__ : int = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: lowercase__ : List[Any] = s - 2 * j break return diff
200
0
'''simple docstring''' import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets lowerCAmelCase_ : str = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n" lowerCAmelCase_ : List[str] = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n" lowerCAmelCase_ : Union[str, Any] = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n" def _lowerCamelCase (__lowerCamelCase : str , __lowerCamelCase : Dict ) -> List[str]: return float((preds == labels).mean() ) def _lowerCamelCase (__lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ) -> List[str]: a__ = simple_accuracy(__lowerCamelCase , __lowerCamelCase ) a__ = float(fa_score(y_true=__lowerCamelCase , y_pred=__lowerCamelCase ) ) return { "accuracy": acc, "f1": fa, } def _lowerCamelCase (__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ) -> Optional[Any]: a__ = np.array(__lowerCamelCase ) a__ = np.array(__lowerCamelCase ) a__ = en_sentvecs.shape[0] # mean centering a__ = en_sentvecs - np.mean(__lowerCamelCase , axis=0 ) a__ = in_sentvecs - np.mean(__lowerCamelCase , axis=0 ) a__ = cdist(__lowerCamelCase , __lowerCamelCase , "cosine" ) a__ = np.array(range(__lowerCamelCase ) ) a__ = sim.argsort(axis=1 )[:, :10] a__ = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): def __a ( self : str ): '''simple docstring''' if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( "You should supply a configuration name selected in " "[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", " "\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", " "\"wiki-ner\"]" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" ) if self.config_name != "cvit-mkb-clsr" else datasets.Sequence(datasets.Value("float32" ) ), "references": datasets.Value("int64" ) if self.config_name != "cvit-mkb-clsr" else datasets.Sequence(datasets.Value("float32" ) ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , ) def __a ( self : Tuple , lowerCamelCase : Dict , lowerCamelCase : Tuple ): '''simple docstring''' if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(lowerCamelCase , lowerCamelCase )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(lowerCamelCase , lowerCamelCase ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(lowerCamelCase , lowerCamelCase )} else: raise KeyError( "You should supply a configuration name selected in " "[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", " "\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", " "\"wiki-ner\"]" )
289
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def _lowerCamelCase (__lowerCamelCase : Any ) -> Optional[int]: a__ = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__lowerCamelCase , __lowerCamelCase ) def _lowerCamelCase (__lowerCamelCase : int ) -> List[Any]: a__ , a__ = emb.weight.shape a__ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) a__ = emb.weight.data return lin_layer def _lowerCamelCase (__lowerCamelCase : List[Any] ) -> Any: a__ = torch.load(__lowerCamelCase , map_location="cpu" ) a__ = mam_aaa["args"] or mam_aaa["cfg"]["model"] a__ = mam_aaa["model"] remove_ignore_keys_(__lowerCamelCase ) a__ = state_dict["encoder.embed_tokens.weight"].shape[0] a__ = MaMaaaConfig( vocab_size=__lowerCamelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , ) a__ = state_dict["decoder.embed_tokens.weight"] a__ = MaMaaaForConditionalGeneration(__lowerCamelCase ) model.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase ) a__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase_ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") lowerCAmelCase_ : Tuple = parser.parse_args() lowerCAmelCase_ : int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
289
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available a__ : Optional[Any] = {'tokenization_herbert': ['HerbertTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[str] = ['HerbertTokenizerFast'] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys a__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
51
import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger lowerCamelCase__ = get_logger(__name__) class _UpperCAmelCase ( enum.Enum ): '''simple docstring''' __A = '''all_checks''' __A = '''basic_checks''' __A = '''no_checks''' class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase__ ( a__ , a__ , a__=None ) ->int: '''simple docstring''' if expected_checksums is None: logger.info("Unable to verify checksums." ) return if len(set(a__ ) - set(a__ ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(a__ ) - set(a__ ) ) ) if len(set(a__ ) - set(a__ ) ) > 0: raise UnexpectedDownloadedFile(str(set(a__ ) - set(a__ ) ) ) _UpperCamelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] _UpperCamelCase = " for " + verification_name if verification_name is not None else "" if len(a__ ) > 0: raise NonMatchingChecksumError( f'Checksums didn\'t match{for_verification_name}:\n' f'{bad_urls}\n' "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" ) logger.info("All the checksums matched successfully" + for_verification_name ) class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase__ ( a__ , a__ ) ->Dict: '''simple docstring''' if expected_splits is None: logger.info("Unable to verify splits sizes." ) return if len(set(a__ ) - set(a__ ) ) > 0: raise ExpectedMoreSplits(str(set(a__ ) - set(a__ ) ) ) if len(set(a__ ) - set(a__ ) ) > 0: raise UnexpectedSplits(str(set(a__ ) - set(a__ ) ) ) _UpperCamelCase = [ {"expected": expected_splits[name], "recorded": recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(a__ ) > 0: raise NonMatchingSplitsSizesError(str(a__ ) ) logger.info("All the splits matched successfully." ) def lowerCAmelCase__ ( a__ , a__ = True ) ->dict: '''simple docstring''' if record_checksum: _UpperCamelCase = shaaaa() with open(a__ , "rb" ) as f: for chunk in iter(lambda: f.read(1 << 20 ) , B"" ): m.update(a__ ) _UpperCamelCase = m.hexdigest() else: _UpperCamelCase = None return {"num_bytes": os.path.getsize(a__ ), "checksum": checksum} def lowerCAmelCase__ ( a__ ) ->Any: '''simple docstring''' if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
547
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=snake_case__ ) class UpperCamelCase_ ( snake_case__ ): lowerCamelCase_ = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} ) lowerCamelCase_ = Features({"audio": Audio()} ) lowerCamelCase_ = Features({"transcription": Value("string" )} ) lowerCamelCase_ = "audio" lowerCamelCase_ = "transcription" def _snake_case ( self :List[Any] , __A :Any ) -> int: """simple docstring""" if self.audio_column not in features: raise ValueError(f'''Column {self.audio_column} is not present in features.''' ) if not isinstance(features[self.audio_column] , UpperCAmelCase_ ): raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' ) SCREAMING_SNAKE_CASE__ = copy.deepcopy(self ) SCREAMING_SNAKE_CASE__ = self.input_schema.copy() SCREAMING_SNAKE_CASE__ = features[self.audio_column] SCREAMING_SNAKE_CASE__ = input_schema return task_template @property def _snake_case ( self :List[str] ) -> Dict[str, str]: """simple docstring""" return {self.audio_column: "audio", self.transcription_column: "transcription"}
702
from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ): return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = ArgumentParser( """HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = parser.add_subparsers(help="""datasets-cli command helpers""" ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(UpperCamelCase__ ) EnvironmentCommand.register_subcommand(UpperCamelCase__ ) TestCommand.register_subcommand(UpperCamelCase__ ) RunBeamCommand.register_subcommand(UpperCamelCase__ ) DummyDataCommand.register_subcommand(UpperCamelCase__ ) # Parse args SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_known_args() if not hasattr(UpperCamelCase__ , """func""" ): parser.print_help() exit(1 ) SCREAMING_SNAKE_CASE__ = parse_unknown_args(UpperCamelCase__ ) # Run SCREAMING_SNAKE_CASE__ = args.func(UpperCamelCase__ , **UpperCamelCase__ ) service.run() if __name__ == "__main__": main()
59
0
'''simple docstring''' import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class A_ (unittest.TestCase ): """simple docstring""" def __init__( self :List[Any] , lowerCAmelCase__ :int ) -> Optional[int]: '''simple docstring''' snake_case_ : int = parent def _A ( self :Dict ) -> int: '''simple docstring''' return {} def __UpperCAmelCase ( )-> Optional[Any]: """simple docstring""" snake_case_ : List[str] = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>" snake_case_ : List[Any] = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n " return [html_string_a, html_string_a] @require_bsa class A_ (lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" a__ = MarkupLMFeatureExtractor if is_bsa_available() else None def _A ( self :Dict ) -> List[Any]: '''simple docstring''' snake_case_ : Any = MarkupLMFeatureExtractionTester(self ) @property def _A ( self :int ) -> Dict: '''simple docstring''' return self.feature_extract_tester.prepare_feat_extract_dict() def _A ( self :Any ) -> Tuple: '''simple docstring''' snake_case_ : str = self.feature_extraction_class() # Test not batched input snake_case_ : int = get_html_strings()[0] snake_case_ : Any = feature_extractor(lowercase__ ) # fmt: off snake_case_ : Optional[Any] = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]] snake_case_ : Optional[Any] = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]] # fmt: on self.assertEqual(encoding.nodes , lowercase__ ) self.assertEqual(encoding.xpaths , lowercase__ ) # Test batched snake_case_ : str = get_html_strings() snake_case_ : Tuple = feature_extractor(lowercase__ ) # fmt: off snake_case_ : Optional[Any] = expected_nodes + [["My First Heading", "My first paragraph."]] snake_case_ : Optional[int] = expected_xpaths + [["/html/body/h1", "/html/body/p"]] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , lowercase__ ) self.assertEqual(encoding.xpaths , lowercase__ )
653
"""simple docstring""" import random def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase = False ) ->dict: """simple docstring""" __lowercase : dict = {i: [] for i in range(_lowerCamelCase )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(_lowerCamelCase ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(_lowerCamelCase ): for j in range(i + 1, _lowerCamelCase ): if random.random() < probability: graph[i].append(_lowerCamelCase ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(_lowerCamelCase ) return graph def snake_case__ ( _lowerCamelCase ) ->dict: """simple docstring""" return { i: [j for j in range(_lowerCamelCase ) if i != j] for i in range(_lowerCamelCase ) } if __name__ == "__main__": import doctest doctest.testmod()
575
0
"""simple docstring""" def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str: A = '' for word_or_phrase in separated: if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise Exception('join() accepts only strings to be joined' ) joined += word_or_phrase + separator return joined.strip(lowerCamelCase__ ) if __name__ == "__main__": from doctest import testmod testmod()
702
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class UpperCAmelCase__ ( unittest.TestCase ): def A_ ( self : Dict ) -> Dict: '''simple docstring''' A = tempfile.mkdtemp() A = BlipImageProcessor() A = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' ) A = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' ) A = InstructBlipProcessor(snake_case , snake_case , snake_case ) processor.save_pretrained(self.tmpdirname ) def A_ ( self : List[str] , **snake_case : str ) -> Dict: '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).tokenizer def A_ ( self : int , **snake_case : Optional[Any] ) -> Any: '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).image_processor def A_ ( self : Any , **snake_case : Union[str, Any] ) -> Any: '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).qformer_tokenizer def A_ ( self : int ) -> Union[str, Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def A_ ( self : List[Any] ) -> Tuple: '''simple docstring''' A = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def A_ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' A = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) A = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) A = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 ) A = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) self.assertIsInstance(processor.qformer_tokenizer , snake_case ) def A_ ( self : Optional[Any] ) -> Dict: '''simple docstring''' A = self.get_image_processor() A = self.get_tokenizer() A = self.get_qformer_tokenizer() A = InstructBlipProcessor( tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case ) A = self.prepare_image_inputs() A = image_processor(snake_case , return_tensors='np' ) A = processor(images=snake_case , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def A_ ( self : Tuple ) -> List[str]: '''simple docstring''' A = self.get_image_processor() A = self.get_tokenizer() A = self.get_qformer_tokenizer() A = InstructBlipProcessor( tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case ) A = 'lower newer' A = processor(text=snake_case ) A = tokenizer(snake_case , return_token_type_ids=snake_case ) A = qformer_tokenizer(snake_case , return_token_type_ids=snake_case ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] ) def A_ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' A = self.get_image_processor() A = self.get_tokenizer() A = self.get_qformer_tokenizer() A = InstructBlipProcessor( tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case ) A = 'lower newer' A = self.prepare_image_inputs() A = processor(text=snake_case , images=snake_case ) self.assertListEqual( list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def A_ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' A = self.get_image_processor() A = self.get_tokenizer() A = self.get_qformer_tokenizer() A = InstructBlipProcessor( tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case ) A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A = processor.batch_decode(snake_case ) A = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def A_ ( self : Tuple ) -> List[Any]: '''simple docstring''' A = self.get_image_processor() A = self.get_tokenizer() A = self.get_qformer_tokenizer() A = InstructBlipProcessor( tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case ) A = 'lower newer' A = self.prepare_image_inputs() A = processor(text=snake_case , images=snake_case ) self.assertListEqual( list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
109
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { """microsoft/beit-base-patch16-224-pt22k""": ( """https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json""" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class UpperCamelCase__ ( __a ): """simple docstring""" A__ : Union[str, Any] = "beit" def __init__( self , SCREAMING_SNAKE_CASE__=8192 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1e-12 , SCREAMING_SNAKE_CASE__=224 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[3, 5, 7, 11] , SCREAMING_SNAKE_CASE__=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.4 , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=255 , **SCREAMING_SNAKE_CASE__ , ) -> int: super().__init__(**__lowerCAmelCase ) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = initializer_range A__ = layer_norm_eps A__ = image_size A__ = patch_size A__ = num_channels A__ = use_mask_token A__ = use_absolute_position_embeddings A__ = use_relative_position_bias A__ = use_shared_relative_position_bias A__ = layer_scale_init_value A__ = drop_path_rate A__ = use_mean_pooling # decode head attributes (semantic segmentation) A__ = out_indices A__ = pool_scales # auxiliary head attributes (semantic segmentation) A__ = use_auxiliary_head A__ = auxiliary_loss_weight A__ = auxiliary_channels A__ = auxiliary_num_convs A__ = auxiliary_concat_input A__ = semantic_loss_ignore_index class UpperCamelCase__ ( __a ): """simple docstring""" A__ : str = version.parse("1.11" ) @property def snake_case__ ( self ) -> Tuple: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def snake_case__ ( self ) -> List[Any]: return 1e-4
104
'''simple docstring''' from __future__ import annotations def A_ ( _lowerCamelCase : int , _lowerCamelCase : int ): if b == 0: return (1, 0) ((_lowerCAmelCase) , (_lowerCAmelCase)) = extended_euclid(_lowerCamelCase , a % b ) _lowerCAmelCase = a // b return (y, x - k * y) def A_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ): ((_lowerCAmelCase) , (_lowerCAmelCase)) = extended_euclid(_lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase = na * na _lowerCAmelCase = ra * x * na + ra * y * na return (n % m + m) % m def A_ ( _lowerCamelCase : int , _lowerCamelCase : int ): ((_lowerCAmelCase) , (_lowerCAmelCase)) = extended_euclid(_lowerCamelCase , _lowerCamelCase ) if b < 0: _lowerCAmelCase = (b % n + n) % n return b def A_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ): _lowerCAmelCase , _lowerCAmelCase = invert_modulo(_lowerCamelCase , _lowerCamelCase ), invert_modulo(_lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase = na * na _lowerCAmelCase = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='''chinese_remainder_theorem''', verbose=True) testmod(name='''chinese_remainder_theorem2''', verbose=True) testmod(name='''invert_modulo''', verbose=True) testmod(name='''extended_euclid''', verbose=True)
309
0
'''simple docstring''' import qiskit def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> qiskit.result.counts.Counts: '''simple docstring''' _a = qiskit.Aer.get_backend('aer_simulator' ) # Create a Quantum Circuit acting on the q register _a = qiskit.QuantumCircuit(lowerCAmelCase__ , lowerCAmelCase__ ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1] ) # Execute the circuit on the qasm simulator _a = qiskit.execute(lowerCAmelCase__ , lowerCAmelCase__ , shots=10_00 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(lowerCAmelCase__ ) if __name__ == "__main__": a_ : Union[str, Any] = single_qubit_measure(2, 2) print(f'''Total count for various states are: {counts}''')
532
'''simple docstring''' def _A (lowerCAmelCase__ :Union[str, Any] ) -> List[str]: '''simple docstring''' return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def _A (lowerCAmelCase__ :dict[int, list[int]] ) -> list[tuple[int, int]]: '''simple docstring''' _a = 0 _a = len(lowerCAmelCase__ ) # No of vertices in graph _a = [0] * n _a = [False] * n def dfs(lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ): _a = True _a = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , id_ ) _a = min(low[at] , low[to] ) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at) ) else: # This edge is a back edge and cannot be a bridge _a = min(low[at] , low[to] ) _a = [] for i in range(lowerCAmelCase__ ): if not visited[i]: dfs(lowerCAmelCase__ , -1 , lowerCAmelCase__ , id_ ) return bridges if __name__ == "__main__": import doctest doctest.testmod()
532
1
"""simple docstring""" import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def lowerCamelCase__ ( _lowerCamelCase : Optional[int] ) -> List[Any]: lowerCamelCase_ = SwinConfig() lowerCamelCase_ = swin_name.split('_' ) lowerCamelCase_ = name_split[1] lowerCamelCase_ = int(name_split[4] ) lowerCamelCase_ = int(name_split[3][-1] ) if model_size == "tiny": lowerCamelCase_ = 96 lowerCamelCase_ = (2, 2, 6, 2) lowerCamelCase_ = (3, 6, 12, 24) elif model_size == "small": lowerCamelCase_ = 96 lowerCamelCase_ = (2, 2, 18, 2) lowerCamelCase_ = (3, 6, 12, 24) elif model_size == "base": lowerCamelCase_ = 128 lowerCamelCase_ = (2, 2, 18, 2) lowerCamelCase_ = (4, 8, 16, 32) else: lowerCamelCase_ = 192 lowerCamelCase_ = (2, 2, 18, 2) lowerCamelCase_ = (6, 12, 24, 48) if "in22k" in swin_name: lowerCamelCase_ = 21841 else: lowerCamelCase_ = 1000 lowerCamelCase_ = 'huggingface/label-files' lowerCamelCase_ = 'imagenet-1k-id2label.json' lowerCamelCase_ = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) ) lowerCamelCase_ = {int(_lowerCamelCase ): v for k, v in idalabel.items()} lowerCamelCase_ = idalabel lowerCamelCase_ = {v: k for k, v in idalabel.items()} lowerCamelCase_ = img_size lowerCamelCase_ = num_classes lowerCamelCase_ = embed_dim lowerCamelCase_ = depths lowerCamelCase_ = num_heads lowerCamelCase_ = window_size return config def lowerCamelCase__ ( _lowerCamelCase : Dict ) -> Dict: if "patch_embed.proj" in name: lowerCamelCase_ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: lowerCamelCase_ = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: lowerCamelCase_ = 'encoder.' + name if "attn.proj" in name: lowerCamelCase_ = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: lowerCamelCase_ = name.replace('attn' , 'attention.self' ) if "norm1" in name: lowerCamelCase_ = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: lowerCamelCase_ = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: lowerCamelCase_ = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: lowerCamelCase_ = name.replace('mlp.fc2' , 'output.dense' ) if name == "norm.weight": lowerCamelCase_ = 'layernorm.weight' if name == "norm.bias": lowerCamelCase_ = 'layernorm.bias' if "head" in name: lowerCamelCase_ = name.replace('head' , 'classifier' ) else: lowerCamelCase_ = 'swin.' + name return name def lowerCamelCase__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ) -> Dict: for key in orig_state_dict.copy().keys(): lowerCamelCase_ = orig_state_dict.pop(_lowerCamelCase ) if "mask" in key: continue elif "qkv" in key: lowerCamelCase_ = key.split('.' ) lowerCamelCase_ = int(key_split[1] ) lowerCamelCase_ = int(key_split[3] ) lowerCamelCase_ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCamelCase_ = val[:dim, :] lowerCamelCase_ = val[ dim : dim * 2, : ] lowerCamelCase_ = val[-dim:, :] else: lowerCamelCase_ = val[ :dim ] lowerCamelCase_ = val[ dim : dim * 2 ] lowerCamelCase_ = val[ -dim: ] else: lowerCamelCase_ = val return orig_state_dict def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] ) -> Optional[int]: lowerCamelCase_ = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase ) timm_model.eval() lowerCamelCase_ = get_swin_config(_lowerCamelCase ) lowerCamelCase_ = SwinForImageClassification(_lowerCamelCase ) model.eval() lowerCamelCase_ = convert_state_dict(timm_model.state_dict() , _lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCamelCase_ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) ) lowerCamelCase_ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) lowerCamelCase_ = image_processor(images=_lowerCamelCase , return_tensors='pt' ) lowerCamelCase_ = timm_model(inputs['pixel_values'] ) lowerCamelCase_ = model(**_lowerCamelCase ).logits assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowerCamelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swin_name''', default='''swin_tiny_patch4_window7_224''', type=str, help='''Name of the Swin timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) _SCREAMING_SNAKE_CASE : Dict = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
549
"""simple docstring""" import functools from typing import Any def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : list[str] ) -> bool: # Validation if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0: raise ValueError('the string should be not empty string' ) if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not all( isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0 for item in words ): raise ValueError('the words should be a list of non-empty strings' ) # Build trie lowerCamelCase_ = {} lowerCamelCase_ = 'WORD_KEEPER' for word in words: lowerCamelCase_ = trie for c in word: if c not in trie_node: lowerCamelCase_ = {} lowerCamelCase_ = trie_node[c] lowerCamelCase_ = True lowerCamelCase_ = len(_lowerCamelCase ) # Dynamic programming method @functools.cache def is_breakable(_lowerCamelCase : int ) -> bool: if index == len_string: return True lowerCamelCase_ = trie for i in range(_lowerCamelCase , _lowerCamelCase ): lowerCamelCase_ = trie_node.get(string[i] , _lowerCamelCase ) if trie_node is None: return False if trie_node.get(_lowerCamelCase , _lowerCamelCase ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
549
1
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Optional[Any]: '''simple docstring''' if height >= 1: move_tower(height - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) move_disk(lowerCAmelCase_ , lowerCAmelCase_ ) move_tower(height - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase__ ( a__ , a__ ) ->List[str]: '''simple docstring''' print("moving disk from" , lowerCAmelCase_ , "to" , lowerCAmelCase_ ) def lowerCAmelCase__ ( ) ->Any: '''simple docstring''' _UpperCamelCase = int(input("Height of hanoi: " ).strip() ) move_tower(lowerCAmelCase_ , "A" , "B" , "C" ) if __name__ == "__main__": main()
703
import logging from transformers import PretrainedConfig lowerCamelCase__ = logging.getLogger(__name__) lowerCamelCase__ = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' __A = '''bertabs''' def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]: """simple docstring""" super().__init__(**lowercase_) _UpperCamelCase = vocab_size _UpperCamelCase = max_pos _UpperCamelCase = enc_layers _UpperCamelCase = enc_hidden_size _UpperCamelCase = enc_heads _UpperCamelCase = enc_ff_size _UpperCamelCase = enc_dropout _UpperCamelCase = dec_layers _UpperCamelCase = dec_hidden_size _UpperCamelCase = dec_heads _UpperCamelCase = dec_ff_size _UpperCamelCase = dec_dropout
82
0
'''simple docstring''' from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
546
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class A_ ( __a , __a , unittest.TestCase ): _A :List[Any] = VQModel _A :Any = '''sample''' @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : int=(32, 32) ): lowercase = 4 lowercase = 3 lowercase = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case__ ) return {"sample": image} @property def SCREAMING_SNAKE_CASE__ ( self : str ): return (3, 32, 32) @property def SCREAMING_SNAKE_CASE__ ( self : Any ): return (3, 32, 32) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 3, } lowercase = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE__ ( self : int ): pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): pass def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase , lowercase = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(snake_case__ ) lowercase = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowercase = VQModel.from_pretrained("""fusing/vqgan-dummy""" ) model.to(snake_case__ ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) lowercase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size ) lowercase = image.to(snake_case__ ) with torch.no_grad(): lowercase = model(snake_case__ ).sample lowercase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off lowercase = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] ) # fmt: on self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
428
0
"""simple docstring""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) lowerCAmelCase : str = _symbol_database.Default() lowerCAmelCase : Dict = _descriptor_pool.Default().AddSerializedFile( b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03""" ) lowerCAmelCase : Union[str, Any] = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals) if _descriptor._USE_C_DESCRIPTORS is False: lowerCAmelCase : Union[str, Any] = None lowerCAmelCase : Optional[Any] = b"""H\003""" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" lowerCAmelCase : Any = 45 lowerCAmelCase : Any = 1581 lowerCAmelCase : Union[str, Any] = 1517 lowerCAmelCase : List[str] = 1570 lowerCAmelCase : List[str] = 1584 lowerCAmelCase : Tuple = 1793 lowerCAmelCase : Optional[int] = 1795 lowerCAmelCase : Optional[Any] = 1916 lowerCAmelCase : int = 1864 lowerCAmelCase : List[str] = 1905 lowerCAmelCase : List[str] = 1919 lowerCAmelCase : Union[str, Any] = 2429 lowerCAmelCase : int = 2208 lowerCAmelCase : List[Any] = 2418 lowerCAmelCase : List[str] = 2323 lowerCAmelCase : Any = 2407 # @@protoc_insertion_point(module_scope)
533
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowerCAmelCase : List[Any] = logging.get_logger(__name__) lowerCAmelCase : int = TypeVar("""DatasetType""", Dataset, IterableDataset) def a__ ( snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = "first_exhausted" , ) -> DatasetType: from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("""Unable to interleave an empty list of datasets.""" ) for i, dataset in enumerate(snake_case__ ): if not isinstance(snake_case__ , (Dataset, IterableDataset) ): if isinstance(snake_case__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' """is an empty dataset dictionary.""" ) raise ValueError( F'Dataset at position {i} has at least one split: {list(snake_case__ )}\n' F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case__ ) )}\']' ) raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case__ ).__name__}.' ) if i == 0: lowerCamelCase , lowerCamelCase = ( (Dataset, IterableDataset) if isinstance(snake_case__ , snake_case__ ) else (IterableDataset, Dataset) ) elif not isinstance(snake_case__ , snake_case__ ): raise ValueError( F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( snake_case__ , snake_case__ , snake_case__ , info=snake_case__ , split=snake_case__ , stopping_strategy=snake_case__ ) else: return _interleave_iterable_datasets( snake_case__ , snake_case__ , snake_case__ , info=snake_case__ , split=snake_case__ , stopping_strategy=snake_case__ ) def a__ ( snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , ) -> DatasetType: if not dsets: raise ValueError("""Unable to concatenate an empty list of datasets.""" ) for i, dataset in enumerate(snake_case__ ): if not isinstance(snake_case__ , (Dataset, IterableDataset) ): if isinstance(snake_case__ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' """is an empty dataset dictionary.""" ) raise ValueError( F'Dataset at position {i} has at least one split: {list(snake_case__ )}\n' F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case__ ) )}\']' ) raise ValueError( F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case__ ).__name__}.' ) if i == 0: lowerCamelCase , lowerCamelCase = ( (Dataset, IterableDataset) if isinstance(snake_case__ , snake_case__ ) else (IterableDataset, Dataset) ) elif not isinstance(snake_case__ , snake_case__ ): raise ValueError( F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(snake_case__ , info=snake_case__ , split=snake_case__ , axis=snake_case__ ) else: return _concatenate_iterable_datasets(snake_case__ , info=snake_case__ , split=snake_case__ , axis=snake_case__ )
533
1
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __lowerCAmelCase : """simple docstring""" def __init__( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Optional[int]=99 , _snake_case : List[Any]=13 , _snake_case : Any=16 , _snake_case : Any=7 , _snake_case : int=True , _snake_case : str=True , _snake_case : List[str]=True , _snake_case : Tuple=False , _snake_case : str=True , _snake_case : Union[str, Any]=2 , _snake_case : Union[str, Any]=32 , _snake_case : Tuple=4 , _snake_case : str=4 , _snake_case : Any=30 , _snake_case : Union[str, Any]=0 , _snake_case : List[str]=1 , _snake_case : Union[str, Any]=2 , _snake_case : List[Any]=None , ): __lowercase : str = parent __lowercase : List[Any] = batch_size __lowercase : List[str] = decoder_seq_length # For common tests __lowercase : Tuple = self.decoder_seq_length __lowercase : str = is_training __lowercase : Tuple = use_attention_mask __lowercase : Any = use_labels __lowercase : Dict = vocab_size __lowercase : List[str] = d_model __lowercase : Any = d_model __lowercase : List[str] = decoder_layers __lowercase : List[str] = decoder_layers __lowercase : int = decoder_ffn_dim __lowercase : Union[str, Any] = decoder_attention_heads __lowercase : Optional[int] = decoder_attention_heads __lowercase : List[str] = eos_token_id __lowercase : Union[str, Any] = bos_token_id __lowercase : Optional[int] = pad_token_id __lowercase : Tuple = decoder_start_token_id __lowercase : Dict = use_cache __lowercase : Dict = max_position_embeddings __lowercase : Tuple = None __lowercase : Optional[Any] = decoder_seq_length __lowercase : Union[str, Any] = 2 __lowercase : int = 1 def snake_case_ ( self : Union[str, Any] ): __lowercase : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __lowercase : Tuple = None if self.use_attention_mask: __lowercase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) __lowercase : Dict = None if self.use_labels: __lowercase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __lowercase : List[str] = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def snake_case_ ( self : Dict , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : int , _snake_case : str , ): __lowercase : str = True __lowercase : Union[str, Any] = TrOCRDecoder(config=_snake_case ).to(_snake_case ).eval() __lowercase : Union[str, Any] = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass __lowercase : str = model(_snake_case , use_cache=_snake_case ) __lowercase : str = model(_snake_case ) __lowercase : int = model(_snake_case , use_cache=_snake_case ) self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) ) self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) + 1 ) __lowercase : List[str] = outputs['''past_key_values'''] # create hypothetical next token and extent to next_input_ids __lowercase : Any = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and __lowercase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowercase : Optional[int] = model(_snake_case )['''last_hidden_state'''] __lowercase : str = model(_snake_case , past_key_values=_snake_case )['''last_hidden_state'''] # select random slice __lowercase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowercase : List[str] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() __lowercase : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_snake_case , _snake_case , atol=1E-3 ) def snake_case_ ( self : List[str] ): __lowercase : str = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase , __lowercase : str = config_and_inputs __lowercase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : List[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () A__ : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else () A__ : List[Any] = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} A__ : Tuple = True A__ : List[str] = False def snake_case_ ( self : Tuple ): __lowercase : List[str] = TrOCRStandaloneDecoderModelTester(self , is_training=_snake_case ) __lowercase : Union[str, Any] = ConfigTester(self , config_class=_snake_case ) def snake_case_ ( self : List[str] ): pass def snake_case_ ( self : int ): pass def snake_case_ ( self : str ): pass def snake_case_ ( self : List[str] ): self.config_tester.run_common_tests() def snake_case_ ( self : Union[str, Any] ): __lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_snake_case ) def snake_case_ ( self : Any ): return @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def snake_case_ ( self : List[Any] ): pass
509
from torch import nn class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple ): super().__init__() __lowercase : Any = class_size __lowercase : List[Any] = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) __lowercase : Dict = nn.Linear(_snake_case , _snake_case ) def snake_case_ ( self : Any , _snake_case : str ): # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) __lowercase : Any = self.mlp(_snake_case ) return logits
509
1
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE( snake_case_ : List[str] ) ->int: '''simple docstring''' _lowercase : List[Any] = OrderedDict() for key, value in state_dict.items(): if key.startswith('''module.encoder''' ): _lowercase : Optional[Any] = key.replace('''module.encoder''' , '''glpn.encoder''' ) if key.startswith('''module.decoder''' ): _lowercase : Dict = key.replace('''module.decoder''' , '''decoder.stages''' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _lowercase : Any = key[key.find('''patch_embed''' ) + len('''patch_embed''' )] _lowercase : Dict = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(snake_case_ )-1}" ) if "norm" in key: _lowercase : List[Any] = key.replace('''norm''' , '''layer_norm''' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _lowercase : Dict = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )] _lowercase : Optional[Any] = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(snake_case_ )-1}" ) if "layer_norm1" in key: _lowercase : Dict = key.replace('''layer_norm1''' , '''layer_norm_1''' ) if "layer_norm2" in key: _lowercase : Optional[int] = key.replace('''layer_norm2''' , '''layer_norm_2''' ) if "block" in key: # replace for example block1 by block.0 _lowercase : str = key[key.find('''block''' ) + len('''block''' )] _lowercase : Optional[int] = key.replace(F"block{idx}" , F"block.{int(snake_case_ )-1}" ) if "attn.q" in key: _lowercase : Optional[int] = key.replace('''attn.q''' , '''attention.self.query''' ) if "attn.proj" in key: _lowercase : Dict = key.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in key: _lowercase : str = key.replace('''attn''' , '''attention.self''' ) if "fc1" in key: _lowercase : Union[str, Any] = key.replace('''fc1''' , '''dense1''' ) if "fc2" in key: _lowercase : List[str] = key.replace('''fc2''' , '''dense2''' ) if "linear_pred" in key: _lowercase : int = key.replace('''linear_pred''' , '''classifier''' ) if "linear_fuse" in key: _lowercase : List[str] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' ) _lowercase : Tuple = key.replace('''linear_fuse.bn''' , '''batch_norm''' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _lowercase : Optional[Any] = key[key.find('''linear_c''' ) + len('''linear_c''' )] _lowercase : int = key.replace(F"linear_c{idx}" , F"linear_c.{int(snake_case_ )-1}" ) if "bot_conv" in key: _lowercase : int = key.replace('''bot_conv''' , '''0.convolution''' ) if "skip_conv1" in key: _lowercase : Optional[Any] = key.replace('''skip_conv1''' , '''1.convolution''' ) if "skip_conv2" in key: _lowercase : List[Any] = key.replace('''skip_conv2''' , '''2.convolution''' ) if "fusion1" in key: _lowercase : List[str] = key.replace('''fusion1''' , '''1.fusion''' ) if "fusion2" in key: _lowercase : Dict = key.replace('''fusion2''' , '''2.fusion''' ) if "fusion3" in key: _lowercase : int = key.replace('''fusion3''' , '''3.fusion''' ) if "fusion" in key and "conv" in key: _lowercase : Union[str, Any] = key.replace('''conv''' , '''convolutional_layer''' ) if key.startswith('''module.last_layer_depth''' ): _lowercase : Tuple = key.replace('''module.last_layer_depth''' , '''head.head''' ) _lowercase : List[str] = value return new_state_dict def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : Union[str, Any] ) ->Tuple: '''simple docstring''' # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _lowercase : Optional[Any] = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" ) _lowercase : Tuple = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" ) # next, add keys and values (in that order) to the state dict _lowercase : List[str] = kv_weight[ : config.hidden_sizes[i], : ] _lowercase : Any = kv_bias[: config.hidden_sizes[i]] _lowercase : Any = kv_weight[ config.hidden_sizes[i] :, : ] _lowercase : Any = kv_bias[config.hidden_sizes[i] :] def _SCREAMING_SNAKE_CASE( ) ->Dict: '''simple docstring''' _lowercase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _lowercase : List[str] = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return image @torch.no_grad() def _SCREAMING_SNAKE_CASE( snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Any=False , snake_case_ : Optional[int]=None ) ->str: '''simple docstring''' _lowercase : Dict = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) _lowercase : List[str] = GLPNImageProcessor() # prepare image _lowercase : int = prepare_img() _lowercase : Any = image_processor(images=snake_case_ , return_tensors='''pt''' ).pixel_values logger.info('''Converting model...''' ) # load original state dict _lowercase : str = torch.load(snake_case_ , map_location=torch.device('''cpu''' ) ) # rename keys _lowercase : str = rename_keys(snake_case_ ) # key and value matrices need special treatment read_in_k_v(snake_case_ , snake_case_ ) # create HuggingFace model and load state dict _lowercase : Tuple = GLPNForDepthEstimation(snake_case_ ) model.load_state_dict(snake_case_ ) model.eval() # forward pass _lowercase : str = model(snake_case_ ) _lowercase : str = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: _lowercase : List[str] = torch.tensor( [[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] ) elif "kitti" in model_name: _lowercase : Optional[Any] = torch.tensor( [[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] ) else: raise ValueError(F"Unknown model name: {model_name}" ) _lowercase : int = torch.Size([1, 4_80, 6_40] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , snake_case_ , atol=1E-4 ) print('''Looks ok!''' ) # finally, push to hub if required if push_to_hub: logger.info('''Pushing model and image processor to the hub...''' ) model.push_to_hub( repo_path_or_name=Path(snake_case_ , snake_case_ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=snake_case_ , ) image_processor.push_to_hub( repo_path_or_name=Path(snake_case_ , snake_case_ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=snake_case_ , ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) lowerCamelCase__ = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
411
'''simple docstring''' import math def _SCREAMING_SNAKE_CASE( snake_case_ : int ) ->list[int]: '''simple docstring''' _lowercase : Optional[int] = [] _lowercase : Any = 2 _lowercase : List[str] = int(math.sqrt(snake_case_ ) ) # Size of every segment _lowercase : Tuple = [True] * (end + 1) _lowercase : List[str] = [] while start <= end: if temp[start] is True: in_prime.append(snake_case_ ) for i in range(start * start , end + 1 , snake_case_ ): _lowercase : Tuple = False start += 1 prime += in_prime _lowercase : str = end + 1 _lowercase : Optional[int] = min(2 * end , snake_case_ ) while low <= n: _lowercase : Optional[int] = [True] * (high - low + 1) for each in in_prime: _lowercase : Union[str, Any] = math.floor(low / each ) * each if t < low: t += each for j in range(snake_case_ , high + 1 , snake_case_ ): _lowercase : Optional[int] = False for j in range(len(snake_case_ ) ): if temp[j] is True: prime.append(j + low ) _lowercase : Union[str, Any] = high + 1 _lowercase : Tuple = min(high + end , snake_case_ ) return prime print(sieve(10**6))
411
1
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = ['''model.decoder.embed_positions.weights'''] def a_ (__A ) -> List[str]: """simple docstring""" if "emb" in name: __a : Dict = name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: __a : str = name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: __a : int = name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: __a : List[Any] = name.replace("linear1" , "fc1" ) if "linear2" in name: __a : List[Any] = name.replace("linear2" , "fc2" ) if "norm1" in name: __a : Tuple = name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: __a : Tuple = name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: __a : str = name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: __a : List[Any] = name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: __a : Optional[int] = name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: __a : Tuple = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def a_ (__A , __A ) -> List[Any]: """simple docstring""" __a : Optional[int] = list(state_dict.keys() ) __a : List[str] = {} for key in keys: __a : Tuple = state_dict.pop(UpperCamelCase__ ) __a : List[str] = rename_keys(UpperCamelCase__ ) if "in_proj_weight" in key: # split fused qkv proj __a : int = val[:hidden_size, :] __a : Optional[Any] = val[hidden_size : 2 * hidden_size, :] __a : Dict = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: __a : Tuple = val else: __a : Optional[Any] = val return state_dict, enc_dec_proj_state_dict def a_ (__A ) -> Optional[Any]: """simple docstring""" if checkpoint == "small": # default config values __a : Tuple = 1_024 __a : Dict = 24 __a : Dict = 16 elif checkpoint == "medium": __a : Optional[Any] = 1_536 __a : Optional[Any] = 48 __a : Any = 24 elif checkpoint == "large": __a : Optional[int] = 2_048 __a : str = 48 __a : List[str] = 32 else: raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' ) __a : Tuple = MusicgenDecoderConfig( hidden_size=UpperCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , ) return config @torch.no_grad() def a_ (__A , __A=None , __A=None , __A="cpu" ) -> List[str]: """simple docstring""" __a : Dict = MusicGen.get_pretrained(UpperCamelCase__ , device=UpperCamelCase__ ) __a : List[str] = decoder_config_from_checkpoint(UpperCamelCase__ ) __a : Dict = fairseq_model.lm.state_dict() __a : List[Any] = rename_state_dict( UpperCamelCase__ , hidden_size=decoder_config.hidden_size ) __a : int = TaEncoderModel.from_pretrained("t5-base" ) __a : Optional[int] = EncodecModel.from_pretrained("facebook/encodec_32khz" ) __a : List[Any] = MusicgenForCausalLM(UpperCamelCase__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection __a : Optional[int] = decoder.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' ) if len(UpperCamelCase__ ) > 0: raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' ) # init the composite model __a : Optional[int] = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__ , audio_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ ) # check we can do a forward pass __a : int = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) __a : Optional[Any] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): __a : Any = model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits if logits.shape != (8, 1, 2_048): raise ValueError("Incorrect shape for logits" ) # now construct the processor __a : Any = AutoTokenizer.from_pretrained("t5-base" ) __a : Dict = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) __a : Tuple = MusicgenProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) # set the appropriate bos/pad token ids __a : Any = 2_048 __a : Any = 2_048 # set other default generation config params __a : List[Any] = int(30 * audio_encoder.config.frame_rate ) __a : Optional[Any] = True __a : Any = 3.0 if pytorch_dump_folder is not None: Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if repo_id: logger.info(f'Pushing model {checkpoint} to {repo_id}' ) model.push_to_hub(UpperCamelCase__ ) processor.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) UpperCAmelCase__ = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
351
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCAmelCase__ ( a , a): '''simple docstring''' @register_to_config def __init__( self , *, __lowerCamelCase = 4 , __lowerCamelCase = 7_6_8 , __lowerCamelCase , __lowerCamelCase , ) -> Union[str, Any]: super().__init__() _A : Tuple = nn.Parameter(torch.zeros(__lowerCamelCase)) # parameters for additional clip time embeddings _A : int = nn.Linear(__lowerCamelCase , __lowerCamelCase) _A : Any = nn.Linear(__lowerCamelCase , __lowerCamelCase) # parameters for encoder hidden states _A : int = clip_extra_context_tokens _A : Dict = nn.Linear( __lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim) _A : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase) _A : Optional[int] = nn.LayerNorm(__lowerCamelCase) def _lowerCamelCase ( self , *, __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> int: if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings _A : Optional[int] = image_embeddings.shape[0] _A : Dict = self.learned_classifier_free_guidance_embeddings.unsqueeze(0) _A : Optional[Any] = classifier_free_guidance_embeddings.expand( __lowerCamelCase , -1) _A : Union[str, Any] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] _A : Optional[Any] = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... _A : List[Any] = self.embedding_proj(__lowerCamelCase) _A : Dict = self.clip_image_embeddings_project_to_time_embeddings(__lowerCamelCase) _A : Tuple = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" _A : Dict = self.clip_extra_context_tokens_proj(__lowerCamelCase) _A : int = clip_extra_context_tokens.reshape(__lowerCamelCase , -1 , self.clip_extra_context_tokens) _A : Dict = clip_extra_context_tokens.permute(0 , 2 , 1) _A : Any = self.encoder_hidden_states_proj(__lowerCamelCase) _A : Optional[Any] = self.text_encoder_hidden_states_norm(__lowerCamelCase) _A : List[Any] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1) return text_encoder_hidden_states, additive_clip_time_embeddings
503
0
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar snake_case_ : List[Any] = TypeVar('''T''') class A__ ( Generic[T] ): def __init__( self : Tuple , _a : bool = True ) -> None: """simple docstring""" _SCREAMING_SNAKE_CASE ={} # dictionary of lists _SCREAMING_SNAKE_CASE =directed def __UpperCamelCase ( self : Union[str, Any] , _a : T , _a : T ) -> GraphAdjacencyList[T]: """simple docstring""" if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(_a ) self.adj_list[destination_vertex].append(_a ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(_a ) _SCREAMING_SNAKE_CASE =[source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(_a ) _SCREAMING_SNAKE_CASE =[destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: _SCREAMING_SNAKE_CASE =[destination_vertex] _SCREAMING_SNAKE_CASE =[source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(_a ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(_a ) _SCREAMING_SNAKE_CASE =[] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: _SCREAMING_SNAKE_CASE =[destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: _SCREAMING_SNAKE_CASE =[destination_vertex] _SCREAMING_SNAKE_CASE =[] return self def __repr__( self : Optional[Any] ) -> str: """simple docstring""" return pformat(self.adj_list )
711
from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL snake_case_ : Union[str, Any] = logging.get_logger(__name__) def lowerCamelCase( a__): if isinstance(a__ ,(list, tuple)) and isinstance(videos[0] ,(list, tuple)) and is_valid_image(videos[0][0]): return videos elif isinstance(a__ ,(list, tuple)) and is_valid_image(videos[0]): return [videos] elif is_valid_image(a__): return [[videos]] raise ValueError(f"Could not make batched video from {videos}") class A__ ( UpperCamelCase__ ): UpperCAmelCase = ["pixel_values"] def __init__( self : Tuple , _a : bool = True , _a : Dict[str, int] = None , _a : PILImageResampling = PILImageResampling.BILINEAR , _a : bool = True , _a : Dict[str, int] = None , _a : bool = True , _a : Union[int, float] = 1 / 255 , _a : bool = True , _a : bool = True , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[float, List[float]]] = None , **_a : Any , ) -> None: """simple docstring""" super().__init__(**_a ) _SCREAMING_SNAKE_CASE =size if size is not None else {'''shortest_edge''': 256} _SCREAMING_SNAKE_CASE =get_size_dict(_a , default_to_square=_a ) _SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _SCREAMING_SNAKE_CASE =get_size_dict(_a , param_name='''crop_size''' ) _SCREAMING_SNAKE_CASE =do_resize _SCREAMING_SNAKE_CASE =size _SCREAMING_SNAKE_CASE =do_center_crop _SCREAMING_SNAKE_CASE =crop_size _SCREAMING_SNAKE_CASE =resample _SCREAMING_SNAKE_CASE =do_rescale _SCREAMING_SNAKE_CASE =rescale_factor _SCREAMING_SNAKE_CASE =offset _SCREAMING_SNAKE_CASE =do_normalize _SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _SCREAMING_SNAKE_CASE =image_std if image_std is not None else IMAGENET_STANDARD_STD def __UpperCamelCase ( self : List[Any] , _a : np.ndarray , _a : Dict[str, int] , _a : PILImageResampling = PILImageResampling.BILINEAR , _a : Optional[Union[str, ChannelDimension]] = None , **_a : List[Any] , ) -> np.ndarray: """simple docstring""" _SCREAMING_SNAKE_CASE =get_size_dict(_a , default_to_square=_a ) if "shortest_edge" in size: _SCREAMING_SNAKE_CASE =get_resize_output_image_size(_a , size['''shortest_edge'''] , default_to_square=_a ) elif "height" in size and "width" in size: _SCREAMING_SNAKE_CASE =(size['''height'''], size['''width''']) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def __UpperCamelCase ( self : int , _a : np.ndarray , _a : Dict[str, int] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Dict , ) -> np.ndarray: """simple docstring""" _SCREAMING_SNAKE_CASE =get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a ) def __UpperCamelCase ( self : Dict , _a : np.ndarray , _a : Union[int, float] , _a : bool = True , _a : Optional[Union[str, ChannelDimension]] = None , **_a : List[str] , ) -> Tuple: """simple docstring""" _SCREAMING_SNAKE_CASE =image.astype(np.floataa ) if offset: _SCREAMING_SNAKE_CASE =image - (scale / 2) return rescale(_a , scale=_a , data_format=_a , **_a ) def __UpperCamelCase ( self : List[str] , _a : np.ndarray , _a : Union[float, List[float]] , _a : Union[float, List[float]] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Any , ) -> np.ndarray: """simple docstring""" return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def __UpperCamelCase ( self : Tuple , _a : ImageInput , _a : bool = None , _a : Dict[str, int] = None , _a : PILImageResampling = None , _a : bool = None , _a : Dict[str, int] = None , _a : bool = None , _a : float = None , _a : bool = None , _a : bool = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: """simple docstring""" if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''' ) # All transformations expect numpy arrays. _SCREAMING_SNAKE_CASE =to_numpy_array(_a ) if do_resize: _SCREAMING_SNAKE_CASE =self.resize(image=_a , size=_a , resample=_a ) if do_center_crop: _SCREAMING_SNAKE_CASE =self.center_crop(_a , size=_a ) if do_rescale: _SCREAMING_SNAKE_CASE =self.rescale(image=_a , scale=_a , offset=_a ) if do_normalize: _SCREAMING_SNAKE_CASE =self.normalize(image=_a , mean=_a , std=_a ) _SCREAMING_SNAKE_CASE =to_channel_dimension_format(_a , _a ) return image def __UpperCamelCase ( self : Tuple , _a : ImageInput , _a : bool = None , _a : Dict[str, int] = None , _a : PILImageResampling = None , _a : bool = None , _a : Dict[str, int] = None , _a : bool = None , _a : float = None , _a : bool = None , _a : bool = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[str, TensorType]] = None , _a : ChannelDimension = ChannelDimension.FIRST , **_a : str , ) -> PIL.Image.Image: """simple docstring""" _SCREAMING_SNAKE_CASE =do_resize if do_resize is not None else self.do_resize _SCREAMING_SNAKE_CASE =resample if resample is not None else self.resample _SCREAMING_SNAKE_CASE =do_center_crop if do_center_crop is not None else self.do_center_crop _SCREAMING_SNAKE_CASE =do_rescale if do_rescale is not None else self.do_rescale _SCREAMING_SNAKE_CASE =rescale_factor if rescale_factor is not None else self.rescale_factor _SCREAMING_SNAKE_CASE =offset if offset is not None else self.offset _SCREAMING_SNAKE_CASE =do_normalize if do_normalize is not None else self.do_normalize _SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else self.image_mean _SCREAMING_SNAKE_CASE =image_std if image_std is not None else self.image_std _SCREAMING_SNAKE_CASE =size if size is not None else self.size _SCREAMING_SNAKE_CASE =get_size_dict(_a , default_to_square=_a ) _SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else self.crop_size _SCREAMING_SNAKE_CASE =get_size_dict(_a , param_name='''crop_size''' ) if not valid_images(_a ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) _SCREAMING_SNAKE_CASE =make_batched(_a ) _SCREAMING_SNAKE_CASE =[ [ self._preprocess_image( image=_a , do_resize=_a , size=_a , resample=_a , do_center_crop=_a , crop_size=_a , do_rescale=_a , rescale_factor=_a , offset=_a , do_normalize=_a , image_mean=_a , image_std=_a , data_format=_a , ) for img in video ] for video in videos ] _SCREAMING_SNAKE_CASE ={'''pixel_values''': videos} return BatchFeature(data=_a , tensor_type=_a )
191
0
'''simple docstring''' import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" lowercase = BertTokenizer lowercase = BertTokenizerFast lowercase = True lowercase = True lowercase = filter_non_english def lowerCamelCase ( self : Union[str, Any] ): super().setUp() snake_case__ : Tuple = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] snake_case__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def lowerCamelCase ( self : Union[str, Any] , snake_case_ : List[str] ): snake_case__ : List[Any] = """UNwant\u00E9d,running""" snake_case__ : Tuple = """unwanted, running""" return input_text, output_text def lowerCamelCase ( self : Any ): snake_case__ : Optional[Any] = self.tokenizer_class(self.vocab_file ) snake_case__ : Dict = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(snake_case_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 12, 10, 11] ) def lowerCamelCase ( self : Tuple ): if not self.test_rust_tokenizer: return snake_case__ : int = self.get_tokenizer() snake_case__ : Optional[Any] = self.get_rust_tokenizer() snake_case__ : str = """UNwant\u00E9d,running""" snake_case__ : List[str] = tokenizer.tokenize(snake_case_ ) snake_case__ : Optional[int] = rust_tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) snake_case__ : Union[str, Any] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) snake_case__ : Optional[Any] = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) snake_case__ : str = self.get_rust_tokenizer() snake_case__ : str = tokenizer.encode(snake_case_ ) snake_case__ : Tuple = rust_tokenizer.encode(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) # With lower casing snake_case__ : List[Any] = self.get_tokenizer(do_lower_case=snake_case_ ) snake_case__ : Optional[int] = self.get_rust_tokenizer(do_lower_case=snake_case_ ) snake_case__ : int = """UNwant\u00E9d,running""" snake_case__ : List[Any] = tokenizer.tokenize(snake_case_ ) snake_case__ : Dict = rust_tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) snake_case__ : Dict = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) snake_case__ : Dict = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) snake_case__ : List[str] = self.get_rust_tokenizer() snake_case__ : str = tokenizer.encode(snake_case_ ) snake_case__ : Any = rust_tokenizer.encode(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ : List[str] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : List[Any] = BasicTokenizer(do_lower_case=snake_case_ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def lowerCamelCase ( self : int ): snake_case__ : Tuple = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : int = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def lowerCamelCase ( self : Any ): snake_case__ : int = BasicTokenizer(do_lower_case=snake_case_ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def lowerCamelCase ( self : Dict ): snake_case__ : List[str] = BasicTokenizer(do_lower_case=snake_case_ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Union[str, Any] = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def lowerCamelCase ( self : Dict ): snake_case__ : Tuple = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def lowerCamelCase ( self : List[str] ): snake_case__ : Any = BasicTokenizer(do_lower_case=snake_case_ , never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def lowerCamelCase ( self : List[str] ): snake_case__ : Tuple = BasicTokenizer() snake_case__ : Optional[int] = """a\n'll !!to?'d of, can't.""" snake_case__ : int = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""] self.assertListEqual(tokenizer.tokenize(snake_case_ ) , snake_case_ ) def lowerCamelCase ( self : List[str] ): snake_case__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] snake_case__ : Optional[Any] = {} for i, token in enumerate(snake_case_ ): snake_case__ : Optional[int] = i snake_case__ : Any = WordpieceTokenizer(vocab=snake_case_ , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] ) def lowerCamelCase ( self : Dict ): self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def lowerCamelCase ( self : Optional[int] ): self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def lowerCamelCase ( self : List[Any] ): self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) def lowerCamelCase ( self : Tuple ): snake_case__ : int = self.get_tokenizer() snake_case__ : Any = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(snake_case_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) self.assertListEqual( [rust_tokenizer.tokenize(snake_case_ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) @slow def lowerCamelCase ( self : Optional[int] ): snake_case__ : Any = self.tokenizer_class.from_pretrained("""bert-base-uncased""" ) snake_case__ : List[str] = tokenizer.encode("""sequence builders""" , add_special_tokens=snake_case_ ) snake_case__ : Tuple = tokenizer.encode("""multi-sequence build""" , add_special_tokens=snake_case_ ) snake_case__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case_ ) snake_case__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def lowerCamelCase ( self : Union[str, Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ ) snake_case__ : Optional[int] = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." snake_case__ : Any = tokenizer_r.encode_plus( snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , return_offsets_mapping=snake_case_ , add_special_tokens=snake_case_ , ) snake_case__ : List[Any] = tokenizer_r.do_lower_case if hasattr(snake_case_ , """do_lower_case""" ) else False snake_case__ : Any = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] ) def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = ["""的""", """人""", """有"""] snake_case__ : int = """""".join(snake_case_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): snake_case__ : List[Any] = True snake_case__ : List[str] = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ ) snake_case__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ ) snake_case__ : Optional[int] = tokenizer_p.encode(snake_case_ , add_special_tokens=snake_case_ ) snake_case__ : List[Any] = tokenizer_r.encode(snake_case_ , add_special_tokens=snake_case_ ) snake_case__ : List[Any] = tokenizer_r.convert_ids_to_tokens(snake_case_ ) snake_case__ : Any = tokenizer_p.convert_ids_to_tokens(snake_case_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(snake_case_ , snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) snake_case__ : Union[str, Any] = False snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ ) snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ ) snake_case__ : List[str] = tokenizer_r.encode(snake_case_ , add_special_tokens=snake_case_ ) snake_case__ : Tuple = tokenizer_p.encode(snake_case_ , add_special_tokens=snake_case_ ) snake_case__ : List[Any] = tokenizer_r.convert_ids_to_tokens(snake_case_ ) snake_case__ : Dict = tokenizer_p.convert_ids_to_tokens(snake_case_ ) # it is expected that only the first Chinese character is not preceded by "##". snake_case__ : Union[str, Any] = [ f"##{token}" if idx != 0 else token for idx, token in enumerate(snake_case_ ) ] self.assertListEqual(snake_case_ , snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ )
374
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __a = 16 __a = 32 def __snake_case( _lowerCAmelCase , _lowerCAmelCase = 16 ) -> Optional[Any]: snake_case__ : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) snake_case__ : Optional[int] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(_lowerCAmelCase ): # max_length=None => use the model max length (it's actually the default) snake_case__ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): snake_case__ : List[str] = datasets.map( _lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case__ : Any = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(_lowerCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. snake_case__ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": snake_case__ : Optional[Any] = 16 elif accelerator.mixed_precision != "no": snake_case__ : Tuple = 8 else: snake_case__ : int = None return tokenizer.pad( _lowerCAmelCase , padding="""longest""" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. snake_case__ : List[Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) snake_case__ : Dict = DataLoader( tokenized_datasets["""validation"""] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __a = mocked_dataloaders # noqa: F811 def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int: # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _lowerCAmelCase ) == "1": snake_case__ : int = 2 # New Code # snake_case__ : Any = int(args.gradient_accumulation_steps ) # Initialize accelerator snake_case__ : Any = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_lowerCAmelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( """Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case__ : List[Any] = config["""lr"""] snake_case__ : Optional[Any] = int(config["""num_epochs"""] ) snake_case__ : Union[str, Any] = int(config["""seed"""] ) snake_case__ : List[str] = int(config["""batch_size"""] ) snake_case__ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" ) set_seed(_lowerCAmelCase ) snake_case__ , snake_case__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowerCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). snake_case__ : Tuple = model.to(accelerator.device ) # Instantiate optimizer snake_case__ : Any = AdamW(params=model.parameters() , lr=_lowerCAmelCase ) # Instantiate scheduler snake_case__ : List[Any] = get_linear_schedule_with_warmup( optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Now we train the model for epoch in range(_lowerCAmelCase ): model.train() for step, batch in enumerate(_lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_lowerCAmelCase ): snake_case__ : Any = model(**_lowerCAmelCase ) snake_case__ : str = output.loss accelerator.backward(_lowerCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case__ : str = model(**_lowerCAmelCase ) snake_case__ : Optional[int] = outputs.logits.argmax(dim=-1 ) snake_case__ , snake_case__ : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=_lowerCAmelCase , references=_lowerCAmelCase , ) snake_case__ : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , _lowerCAmelCase ) def __snake_case( ) -> List[str]: snake_case__ : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=_lowerCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) snake_case__ : Tuple = parser.parse_args() snake_case__ : Dict = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(_lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": main()
374
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase: Tuple = { 'configuration_xlm_roberta': [ 'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaConfig', 'XLMRobertaOnnxConfig', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Optional[Any] = ['XLMRobertaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Dict = ['XLMRobertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Dict = [ 'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaForCausalLM', 'XLMRobertaForMaskedLM', 'XLMRobertaForMultipleChoice', 'XLMRobertaForQuestionAnswering', 'XLMRobertaForSequenceClassification', 'XLMRobertaForTokenClassification', 'XLMRobertaModel', 'XLMRobertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: List[Any] = [ 'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMRobertaForCausalLM', 'TFXLMRobertaForMaskedLM', 'TFXLMRobertaForMultipleChoice', 'TFXLMRobertaForQuestionAnswering', 'TFXLMRobertaForSequenceClassification', 'TFXLMRobertaForTokenClassification', 'TFXLMRobertaModel', 'TFXLMRobertaPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase: Tuple = [ 'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'FlaxXLMRobertaForMaskedLM', 'FlaxXLMRobertaForCausalLM', 'FlaxXLMRobertaForMultipleChoice', 'FlaxXLMRobertaForQuestionAnswering', 'FlaxXLMRobertaForSequenceClassification', 'FlaxXLMRobertaForTokenClassification', 'FlaxXLMRobertaModel', 'FlaxXLMRobertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys lowerCAmelCase: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
718
'''simple docstring''' import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class a__( unittest.TestCase ): def __init__( self : Dict , __snake_case : Union[str, Any] , __snake_case : Any=13 , __snake_case : Any=7 , __snake_case : int=True , __snake_case : List[Any]=True , __snake_case : int=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=99 , __snake_case : List[str]=32 , __snake_case : List[Any]=5 , __snake_case : int=4 , __snake_case : List[str]=37 , __snake_case : int="gelu" , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Any=5_12 , __snake_case : Tuple=16 , __snake_case : Union[str, Any]=2 , __snake_case : str=0.02 , __snake_case : Union[str, Any]=4 , ): a : List[Any] = parent a : List[str] = batch_size a : Dict = seq_length a : str = is_training a : Optional[int] = use_attention_mask a : Union[str, Any] = use_token_type_ids a : List[str] = use_labels a : Dict = vocab_size a : Tuple = hidden_size a : Optional[Any] = num_hidden_layers a : List[str] = num_attention_heads a : Tuple = intermediate_size a : Dict = hidden_act a : List[str] = hidden_dropout_prob a : str = attention_probs_dropout_prob a : str = max_position_embeddings a : Tuple = type_vocab_size a : Optional[Any] = type_sequence_label_size a : Union[str, Any] = initializer_range a : Union[str, Any] = num_choices def lowercase_ ( self : Any ): a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a : List[str] = None if self.use_attention_mask: a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) a : Dict = None if self.use_token_type_ids: a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a : Dict = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowercase_ ( self : Tuple ): a : Optional[int] = self.prepare_config_and_inputs() a , a , a , a : Union[str, Any] = config_and_inputs a : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict def lowercase_ ( self : List[str] ): a : int = self.prepare_config_and_inputs() a , a , a , a : Tuple = config_and_inputs a : int = True a : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class a__( lowerCamelCase__ , unittest.TestCase ): lowercase__ = True lowercase__ = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def lowercase_ ( self : Union[str, Any] ): a : str = FlaxBertModelTester(self ) @slow def lowercase_ ( self : Any ): # Only check this for base model, not necessary for all model classes. # This will also help speed-up tests. a : Optional[Any] = FlaxBertModel.from_pretrained('bert-base-cased' ) a : Union[str, Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__snake_case )
195
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) _lowerCAmelCase = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _lowerCAmelCase = model(_lowercase )["""last_hidden_state"""] _lowerCAmelCase = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _lowercase ) # compare the actual values for a slice. _lowerCAmelCase = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
5
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ : Tuple = { """configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""], """processing_mgp_str""": ["""MgpstrProcessor"""], """tokenization_mgp_str""": ["""MgpstrTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Optional[int] = [ """MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""", """MgpstrModel""", """MgpstrPreTrainedModel""", """MgpstrForSceneTextRecognition""", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
12
0
from __future__ import annotations def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ): A_ : str = len(SCREAMING_SNAKE_CASE ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(SCREAMING_SNAKE_CASE ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): A_ : list[list[str]] = [] depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Print all the boards for board in boards: for column in board: print(SCREAMING_SNAKE_CASE ) print('''''' ) print(len(SCREAMING_SNAKE_CASE ) , '''solutions were found.''' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
711
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): return getitem, k def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return setitem, k, v def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): return delitem, k def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ): try: return fun(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ), None except Exception as e: return None, e UpperCamelCase = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) UpperCamelCase = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] UpperCamelCase = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] UpperCamelCase = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] UpperCamelCase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] UpperCamelCase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( '''operations''' , ( pytest.param(_add_items , id='''add items''' ), pytest.param(_overwrite_items , id='''overwrite items''' ), pytest.param(_delete_items , id='''delete items''' ), pytest.param(_access_absent_items , id='''access absent items''' ), pytest.param(_add_with_resize_up , id='''add with resize up''' ), pytest.param(_add_with_resize_down , id='''add with resize down''' ), ) , ) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): A_ : Optional[Any] = HashMap(initial_block_size=4 ) A_ : Union[str, Any] = {} for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE ): A_ , A_ : Union[str, Any] = _run_operation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) A_ , A_ : int = _run_operation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) assert my_res == py_res assert str(SCREAMING_SNAKE_CASE ) == str(SCREAMING_SNAKE_CASE ) assert set(SCREAMING_SNAKE_CASE ) == set(SCREAMING_SNAKE_CASE ) assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) assert set(my.items() ) == set(py.items() ) def _SCREAMING_SNAKE_CASE ( ): def is_public(SCREAMING_SNAKE_CASE ) -> bool: return not name.startswith('''_''' ) A_ : Tuple = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE )} A_ : Optional[Any] = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE )} assert dict_public_names > hash_public_names
152
0
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants lowercase__ : Optional[int] = Mapping[str, np.ndarray] lowercase__ : List[str] = Mapping[str, Any] # Is a nested dict. lowercase__ : List[Any] = 0.01 @dataclasses.dataclass(frozen=UpperCamelCase__ ) class a__ : a : np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. a : np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. a : np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. a : np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. a : np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions a : Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files a : Optional[str] = None # Templates used to generate this protein (prediction-only) a : Optional[Sequence[str]] = None # Chain corresponding to each parent a : Optional[Sequence[int]] = None def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Protein: a = r"(\[[A-Z]+\]\n)" a = [tag.strip() for tag in re.split(__UpperCamelCase , __UpperCamelCase) if len(__UpperCamelCase) > 0] a = zip(tags[0::2] , [l.split("\n") for l in tags[1::2]]) a = ["N", "CA", "C"] a = None a = None a = None for g in groups: if "[PRIMARY]" == g[0]: a = g[1][0].strip() for i in range(len(__UpperCamelCase)): if seq[i] not in residue_constants.restypes: a = "X" # FIXME: strings are immutable a = np.array( [residue_constants.restype_order.get(__UpperCamelCase , residue_constants.restype_num) for res_symbol in seq]) elif "[TERTIARY]" == g[0]: a = [] for axis in range(3): tertiary.append(list(map(__UpperCamelCase , g[1][axis].split()))) a = np.array(__UpperCamelCase) a = np.zeros((len(tertiary[0]) // 3, residue_constants.atom_type_num, 3)).astype(np.floataa) for i, atom in enumerate(__UpperCamelCase): a = np.transpose(tertiary_np[:, i::3]) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: a = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip()))) a = np.zeros( ( len(__UpperCamelCase), residue_constants.atom_type_num, )).astype(np.floataa) for i, atom in enumerate(__UpperCamelCase): a = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=__UpperCamelCase , atom_mask=__UpperCamelCase , aatype=__UpperCamelCase , residue_index=np.arange(len(__UpperCamelCase)) , b_factors=__UpperCamelCase , ) def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase = 0) -> List[str]: a = [] a = prot.remark if remark is not None: pdb_headers.append(f'''REMARK {remark}''') a = prot.parents a = prot.parents_chain_index if parents is not None and parents_chain_index is not None: a = [p for i, p in zip(__UpperCamelCase , __UpperCamelCase) if i == chain_id] if parents is None or len(__UpperCamelCase) == 0: a = ["N/A"] pdb_headers.append(f'''PARENT {" ".join(__UpperCamelCase)}''') return pdb_headers def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> str: a = [] a = pdb_str.split("\n") a = prot.remark if remark is not None: out_pdb_lines.append(f'''REMARK {remark}''') a = 42 if prot.parents is not None and len(prot.parents) > 0: a = [] if prot.parents_chain_index is not None: a = {} for p, i in zip(prot.parents , prot.parents_chain_index): parent_dict.setdefault(str(__UpperCamelCase) , []) parent_dict[str(__UpperCamelCase)].append(__UpperCamelCase) a = max([int(__UpperCamelCase) for chain_idx in parent_dict]) for i in range(max_idx + 1): a = parent_dict.get(str(__UpperCamelCase) , ["N/A"]) parents_per_chain.append(__UpperCamelCase) else: parents_per_chain.append(list(prot.parents)) else: a = [["N/A"]] def make_parent_line(__UpperCamelCase) -> str: return f'''PARENT {" ".join(__UpperCamelCase)}''' out_pdb_lines.append(make_parent_line(parents_per_chain[0])) a = 0 for i, l in enumerate(__UpperCamelCase): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(__UpperCamelCase) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(__UpperCamelCase): a = parents_per_chain[chain_counter] else: a = ["N/A"] out_pdb_lines.append(make_parent_line(__UpperCamelCase)) return "\n".join(__UpperCamelCase) def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> str: a = residue_constants.restypes + ["X"] def res_atoa(__UpperCamelCase) -> str: return residue_constants.restype_atoa.get(restypes[r] , "UNK") a = residue_constants.atom_types a = [] a = prot.atom_mask a = prot.aatype a = prot.atom_positions a = prot.residue_index.astype(np.intaa) a = prot.b_factors a = prot.chain_index if np.any(aatype > residue_constants.restype_num): raise ValueError("Invalid aatypes.") a = get_pdb_headers(__UpperCamelCase) if len(__UpperCamelCase) > 0: pdb_lines.extend(__UpperCamelCase) a = aatype.shape[0] a = 1 a = 0 a = string.ascii_uppercase a = None # Add all atom sites. for i in range(__UpperCamelCase): a = res_atoa(aatype[i]) for atom_name, pos, mask, b_factor in zip(__UpperCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i]): if mask < 0.5: continue a = "ATOM" a = atom_name if len(__UpperCamelCase) == 4 else f''' {atom_name}''' a = "" a = "" a = 1.00 a = atom_name[0] # Protein supports only C, N, O, S, this works. a = "" a = "A" if chain_index is not None: a = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! a = ( f'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}''' f'''{res_name_a:>3} {chain_tag:>1}''' f'''{residue_index[i]:>4}{insertion_code:>1} ''' f'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}''' f'''{occupancy:>6.2f}{b_factor:>6.2f} ''' f'''{element:>2}{charge:>2}''' ) pdb_lines.append(__UpperCamelCase) atom_index += 1 a = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: a = True a = chain_index[i + 1] if should_terminate: # Close the chain. a = "TER" a = ( f'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i]):>3} {chain_tag:>1}{residue_index[i]:>4}''' ) pdb_lines.append(__UpperCamelCase) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(__UpperCamelCase , __UpperCamelCase)) pdb_lines.append("END") pdb_lines.append("") return "\n".join(__UpperCamelCase) def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> np.ndarray: return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , ) -> Protein: return Protein( aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"]) , chain_index=__UpperCamelCase , remark=__UpperCamelCase , parents=__UpperCamelCase , parents_chain_index=__UpperCamelCase , )
515
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline lowercase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class a__ ( UpperCamelCase__ ): def __init__( self , A , A ) -> Optional[int]: '''simple docstring''' super().__init__() self.register_modules(unet=A , scheduler=A ) @torch.no_grad() def __call__( self , A = 1 , A = 100 , A = None , A = None , A = True , ) -> Union[AudioPipelineOutput, Tuple]: '''simple docstring''' if audio_length_in_s is None: a = self.unet.config.sample_size / self.unet.config.sample_rate a = audio_length_in_s * self.unet.config.sample_rate a = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' ) a = int(A ) if sample_size % down_scale_factor != 0: a = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' " process." ) a = int(A ) a = next(iter(self.unet.parameters() ) ).dtype a = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(A , A ) and len(A ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(A )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) a = randn_tensor(A , generator=A , device=self.device , dtype=A ) # set step values self.scheduler.set_timesteps(A , device=audio.device ) a = self.scheduler.timesteps.to(A ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output a = self.unet(A , A ).sample # 2. compute previous image: x_t -> t_t-1 a = self.scheduler.step(A , A , A ).prev_sample a = audio.clamp(-1 , 1 ).float().cpu().numpy() a = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=A )
515
1
"""simple docstring""" from __future__ import annotations from typing import Any class snake_case : def __init__( self :int , _lowerCamelCase :int , _lowerCamelCase :int , _lowerCamelCase :float = 0 ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = row, column __SCREAMING_SNAKE_CASE : Optional[Any] = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )] def __str__( self :Any ): __SCREAMING_SNAKE_CASE : Optional[Any] = f'''Matrix consist of {self.row} rows and {self.column} columns\n''' # Make string identifier __SCREAMING_SNAKE_CASE : Dict = 0 for row_vector in self.array: for obj in row_vector: __SCREAMING_SNAKE_CASE : int = max(_lowerCamelCase , len(str(_lowerCamelCase ) ) ) __SCREAMING_SNAKE_CASE : Any = f'''%{max_element_length}s''' # Make string and return def single_line(_lowerCamelCase :list[float] ) -> str: nonlocal string_format_identifier __SCREAMING_SNAKE_CASE : Dict = '''[''' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array ) return s def __repr__( self :str ): return str(self ) def SCREAMING_SNAKE_CASE_ ( self :str , _lowerCamelCase :tuple[int, int] ): if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self :List[str] , _lowerCamelCase :tuple[int, int] ): assert self.validate_indicies(_lowerCamelCase ) return self.array[loc[0]][loc[1]] def __setitem__( self :Tuple , _lowerCamelCase :tuple[int, int] , _lowerCamelCase :float ): assert self.validate_indicies(_lowerCamelCase ) __SCREAMING_SNAKE_CASE : Union[str, Any] = value def __add__( self :int , _lowerCamelCase :Matrix ): assert isinstance(_lowerCamelCase , _lowerCamelCase ) assert self.row == another.row and self.column == another.column # Add __SCREAMING_SNAKE_CASE : Dict = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __SCREAMING_SNAKE_CASE : Optional[Any] = self[r, c] + another[r, c] return result def __neg__( self :List[Any] ): __SCREAMING_SNAKE_CASE : str = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __SCREAMING_SNAKE_CASE : Dict = -self[r, c] return result def __sub__( self :Optional[int] , _lowerCamelCase :Matrix ): return self + (-another) def __mul__( self :int , _lowerCamelCase :int | float | Matrix ): if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication __SCREAMING_SNAKE_CASE : str = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __SCREAMING_SNAKE_CASE : List[Any] = self[r, c] * another return result elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication assert self.column == another.row __SCREAMING_SNAKE_CASE : Dict = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: __SCREAMING_SNAKE_CASE : Optional[Any] = f'''Unsupported type given for another ({type(_lowerCamelCase )})''' raise TypeError(_lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ): __SCREAMING_SNAKE_CASE : List[str] = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): __SCREAMING_SNAKE_CASE : Dict = self[r, c] return result def SCREAMING_SNAKE_CASE_ ( self :int , _lowerCamelCase :Matrix , _lowerCamelCase :Matrix ): assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate __SCREAMING_SNAKE_CASE : Tuple = v.transpose() __SCREAMING_SNAKE_CASE : Optional[Any] = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def lowerCAmelCase_ ( ): '''simple docstring''' __SCREAMING_SNAKE_CASE : Any = Matrix(3 , 3 , 0 ) for i in range(3 ): __SCREAMING_SNAKE_CASE : Optional[int] = 1 print(F'''a^(-1) is {ainv}''' ) # u, v __SCREAMING_SNAKE_CASE : List[str] = Matrix(3 , 1 , 0 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = 1, 2, -3 __SCREAMING_SNAKE_CASE : int = Matrix(3 , 1 , 0 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = 4, -2, 5 print(F'''u is {u}''' ) print(F'''v is {v}''' ) print(F'''uv^T is {u * v.transpose()}''' ) # Sherman Morrison print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase_ , lowercase_ )}''' ) def lowerCAmelCase_ ( ): '''simple docstring''' import doctest doctest.testmod() testa()
401
"""simple docstring""" import heapq as hq import math from collections.abc import Iterator class snake_case : def __init__( self :Dict , _lowerCamelCase :List[str] ): __SCREAMING_SNAKE_CASE : Union[str, Any] = str(id_ ) __SCREAMING_SNAKE_CASE : Any = None __SCREAMING_SNAKE_CASE : Optional[int] = None __SCREAMING_SNAKE_CASE : Union[str, Any] = [] __SCREAMING_SNAKE_CASE : Optional[Any] = {} # {vertex:distance} def __lt__( self :Any , _lowerCamelCase :Any ): return self.key < other.key def __repr__( self :Any ): return self.id def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :str ): self.neighbors.append(_lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , _lowerCamelCase :Any , _lowerCamelCase :Tuple ): __SCREAMING_SNAKE_CASE : int = weight def lowerCAmelCase_ ( lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Dict ): '''simple docstring''' graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , lowercase_ ) graph[b - 1].add_edge(graph[a - 1] , lowercase_ ) def lowerCAmelCase_ ( lowercase_ : list , lowercase_ : Vertex ): '''simple docstring''' __SCREAMING_SNAKE_CASE : List[str] = [] for u in graph: __SCREAMING_SNAKE_CASE : Tuple = math.inf __SCREAMING_SNAKE_CASE : Optional[int] = None __SCREAMING_SNAKE_CASE : Optional[int] = 0 __SCREAMING_SNAKE_CASE : Dict = graph[:] while q: __SCREAMING_SNAKE_CASE : Tuple = min(lowercase_ ) q.remove(lowercase_ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): __SCREAMING_SNAKE_CASE : Tuple = u __SCREAMING_SNAKE_CASE : List[str] = u.edges[v.id] for i in range(1 , len(lowercase_ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def lowerCAmelCase_ ( lowercase_ : list , lowercase_ : Vertex ): '''simple docstring''' for u in graph: __SCREAMING_SNAKE_CASE : Optional[Any] = math.inf __SCREAMING_SNAKE_CASE : Dict = None __SCREAMING_SNAKE_CASE : List[Any] = 0 __SCREAMING_SNAKE_CASE : Dict = list(lowercase_ ) hq.heapify(lowercase_ ) while h: __SCREAMING_SNAKE_CASE : int = hq.heappop(lowercase_ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): __SCREAMING_SNAKE_CASE : Union[str, Any] = u __SCREAMING_SNAKE_CASE : int = u.edges[v.id] hq.heapify(lowercase_ ) for i in range(1 , len(lowercase_ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def lowerCAmelCase_ ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
401
1
def lowerCamelCase__ ( __A :int ): """simple docstring""" __snake_case = int(__A ) if n_element < 1: __snake_case = ValueError("""a should be a positive number""" ) raise my_error __snake_case = [1] __snake_case , __snake_case , __snake_case = (0, 0, 0) __snake_case = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 ,hamming_list[j] * 3 ,hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": UpperCamelCase__ = input('''Enter the last number (nth term) of the Hamming Number Series: ''') print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''') UpperCamelCase__ = hamming(int(n)) print('''-----------------------------------------------------''') print(F'The list with nth numbers is: {hamming_numbers}') print('''-----------------------------------------------------''')
268
from argparse import ArgumentParser from . import BaseTransformersCLICommand def lowerCamelCase__ ( __A :Optional[int] ): """simple docstring""" return DownloadCommand(args.model ,args.cache_dir ,args.force ,args.trust_remote_code ) class __snake_case ( snake_case__ ): """simple docstring""" @staticmethod def a ( _UpperCamelCase ) -> Any: """simple docstring""" __snake_case = parser.add_parser("""download""" ) download_parser.add_argument( """--cache-dir""" , type=_UpperCamelCase , default=_UpperCamelCase , help="""Path to location to store the models""" ) download_parser.add_argument( """--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" ) download_parser.add_argument( """--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , ) download_parser.add_argument("""model""" , type=_UpperCamelCase , help="""Name of the model to download""" ) download_parser.set_defaults(func=_UpperCamelCase ) def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" __snake_case = model __snake_case = cache __snake_case = force __snake_case = trust_remote_code def a ( self ) -> List[Any]: """simple docstring""" from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
268
1
import argparse import logging import pickle from collections import Counter logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) lowerCAmelCase_ : Tuple = logging.getLogger(__name__) if __name__ == "__main__": lowerCAmelCase_ : List[Any] = argparse.ArgumentParser( description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)" ) parser.add_argument( "--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset." ) parser.add_argument( "--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file." ) parser.add_argument("--vocab_size", default=30_522, type=int) lowerCAmelCase_ : int = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, "rb") as fp: lowerCAmelCase_ : List[Any] = pickle.load(fp) logger.info("Counting occurrences for MLM.") lowerCAmelCase_ : List[Any] = Counter() for tk_ids in data: counter.update(tk_ids) lowerCAmelCase_ : Dict = [0] * args.vocab_size for k, v in counter.items(): lowerCAmelCase_ : Tuple = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, "wb") as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
704
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ : str = { "configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"], "feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"], "processing_wav2vec2": ["Wav2Vec2Processor"], "tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Union[str, Any] = [ "WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ForAudioFrameClassification", "Wav2Vec2ForCTC", "Wav2Vec2ForMaskedLM", "Wav2Vec2ForPreTraining", "Wav2Vec2ForSequenceClassification", "Wav2Vec2ForXVector", "Wav2Vec2Model", "Wav2Vec2PreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : int = [ "TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWav2Vec2ForCTC", "TFWav2Vec2Model", "TFWav2Vec2PreTrainedModel", "TFWav2Vec2ForSequenceClassification", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Dict = [ "FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWav2Vec2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys lowerCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
461
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""", } class _lowerCAmelCase ( UpperCAmelCase_ ): A__ = 'data2vec-text' def __init__( self , __UpperCAmelCase=3_0522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ): super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) lowerCAmelCase__ : str = vocab_size lowerCAmelCase__ : Dict = hidden_size lowerCAmelCase__ : List[str] = num_hidden_layers lowerCAmelCase__ : Tuple = num_attention_heads lowerCAmelCase__ : List[Any] = hidden_act lowerCAmelCase__ : Tuple = intermediate_size lowerCAmelCase__ : Tuple = hidden_dropout_prob lowerCAmelCase__ : Any = attention_probs_dropout_prob lowerCAmelCase__ : List[Any] = max_position_embeddings lowerCAmelCase__ : Optional[int] = type_vocab_size lowerCAmelCase__ : Optional[int] = initializer_range lowerCAmelCase__ : Tuple = layer_norm_eps lowerCAmelCase__ : str = position_embedding_type lowerCAmelCase__ : List[Any] = use_cache lowerCAmelCase__ : Optional[Any] = classifier_dropout class _lowerCAmelCase ( UpperCAmelCase_ ): @property def __magic_name__( self ): if self.task == "multiple-choice": lowerCAmelCase__ : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCAmelCase__ : Dict = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
678
from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig UpperCamelCase__ = { 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class A ( UpperCAmelCase_ ): __UpperCAmelCase : Optional[Any] = 'ernie_m' __UpperCAmelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__(self : Any , __UpperCAmelCase : int = 2_5_0_0_0_2 , __UpperCAmelCase : int = 7_6_8 , __UpperCAmelCase : int = 1_2 , __UpperCAmelCase : int = 1_2 , __UpperCAmelCase : int = 3_0_7_2 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 5_1_4 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : float = 1E-05 , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Any=0.0 , **__UpperCAmelCase : Tuple , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase ) UpperCAmelCase__ = vocab_size UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_act UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = max_position_embeddings UpperCAmelCase__ = initializer_range UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = classifier_dropout UpperCAmelCase__ = is_decoder UpperCAmelCase__ = act_dropout
486
0
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCamelCase ( a__ ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Dict ="hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def snake_case ( self , __a=0 ): __lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(lowerCamelCase_ ) ) __lowerCAmelCase = np.random.RandomState(lowerCamelCase_ ) __lowerCAmelCase = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.7_5, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def snake_case ( self ): __lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) __lowerCAmelCase = self.get_dummy_inputs() __lowerCAmelCase = pipe(**lowerCamelCase_ ).images __lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) __lowerCAmelCase = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def snake_case ( self ): __lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) __lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) __lowerCAmelCase = self.get_dummy_inputs() __lowerCAmelCase = pipe(**lowerCamelCase_ ).images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) __lowerCAmelCase = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def snake_case ( self ): __lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) __lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) # warmup pass to apply optimizations __lowerCAmelCase = pipe(**self.get_dummy_inputs() ) __lowerCAmelCase = self.get_dummy_inputs() __lowerCAmelCase = pipe(**lowerCamelCase_ ).images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) __lowerCAmelCase = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def snake_case ( self ): __lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) __lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) __lowerCAmelCase = self.get_dummy_inputs() __lowerCAmelCase = pipe(**lowerCamelCase_ ).images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) __lowerCAmelCase = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def snake_case ( self ): __lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) __lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) __lowerCAmelCase = self.get_dummy_inputs() __lowerCAmelCase = pipe(**lowerCamelCase_ ).images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) __lowerCAmelCase = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def snake_case ( self ): __lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) __lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) __lowerCAmelCase = self.get_dummy_inputs() __lowerCAmelCase = pipe(**lowerCamelCase_ ).images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) __lowerCAmelCase = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @property def snake_case ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def snake_case ( self ): __lowerCAmelCase = ort.SessionOptions() __lowerCAmelCase = False return options def snake_case ( self ): __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __lowerCAmelCase = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default __lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) __lowerCAmelCase = "A fantasy landscape, trending on artstation" __lowerCAmelCase = np.random.RandomState(0 ) __lowerCAmelCase = pipe( prompt=lowerCamelCase_ , image=lowerCamelCase_ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase_ , output_type="np" , ) __lowerCAmelCase = output.images __lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) __lowerCAmelCase = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def snake_case ( self ): __lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __lowerCAmelCase = init_image.resize((7_68, 5_12) ) __lowerCAmelCase = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) __lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) __lowerCAmelCase = "A fantasy landscape, trending on artstation" __lowerCAmelCase = np.random.RandomState(0 ) __lowerCAmelCase = pipe( prompt=lowerCamelCase_ , image=lowerCamelCase_ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase_ , output_type="np" , ) __lowerCAmelCase = output.images __lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) __lowerCAmelCase = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
710
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class _UpperCamelCase : '''simple docstring''' def snake_case ( self , __a , __a , __a ): return None class _UpperCamelCase : '''simple docstring''' def snake_case ( self , __a , __a , __a , __a ): return None class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Tuple =[ # (model_name, model_kwargs) ("""bert-base-cased""", {}), ("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def snake_case ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__a , "tf" , 12 , **__a ) @require_torch @slow def snake_case ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__a , "pt" , 12 , **__a ) @require_torch @slow def snake_case ( self ): from transformers import BertModel __lowerCAmelCase = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"] with NamedTemporaryFile(mode="w+t" ) as vocab_file: vocab_file.write("\n".join(__a ) ) vocab_file.flush() __lowerCAmelCase = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: __lowerCAmelCase = BertModel(BertConfig(vocab_size=len(__a ) ) ) model.save_pretrained(__a ) self._test_export(__a , "pt" , 12 , __a ) @require_tf @slow def snake_case ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: __lowerCAmelCase = self._test_export(__a , "tf" , 12 , **__a ) __lowerCAmelCase = quantize(Path(__a ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__a ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) @require_torch @slow def snake_case ( self ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: __lowerCAmelCase = self._test_export(__a , "pt" , 12 , **__a ) __lowerCAmelCase = quantize(__a ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__a ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) def snake_case ( self , __a , __a , __a , __a=None , **__a ): try: # Compute path with TemporaryDirectory() as tempdir: __lowerCAmelCase = Path(__a ).joinpath("model.onnx" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__a , __a , __a , __a , __a , **__a ) return path except Exception as e: self.fail(__a ) @require_torch @require_tokenizers @slow def snake_case ( self ): from transformers import BertModel __lowerCAmelCase = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) __lowerCAmelCase = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(__a , __a , "pt" ) @require_tf @require_tokenizers @slow def snake_case ( self ): from transformers import TFBertModel __lowerCAmelCase = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) __lowerCAmelCase = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(__a , __a , "tf" ) def snake_case ( self , __a , __a , __a ): __lowerCAmelCase = FeatureExtractionPipeline(__a , __a ) __lowerCAmelCase = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"] __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = infer_shapes(__a , __a ) # Assert all variables are present self.assertEqual(len(__a ) , len(__a ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __a ) self.assertSequenceEqual(variable_names[3:] , __a ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} ) self.assertDictEqual(shapes["output_1"] , {0: "batch"} ) def snake_case ( self ): __lowerCAmelCase = ["input_ids", "attention_mask", "token_type_ids"] __lowerCAmelCase = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]} __lowerCAmelCase , __lowerCAmelCase = ensure_valid_input(FuncContiguousArgs() , __a , __a ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__a ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__a ) , set(__a ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__a , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) __lowerCAmelCase , __lowerCAmelCase = ensure_valid_input(FuncNonContiguousArgs() , __a , __a ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__a ) , 1 ) self.assertEqual(len(__a ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens["input_ids"] ) self.assertEqual(ordered_input_names[0] , "input_ids" ) def snake_case ( self ): __lowerCAmelCase = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" ) self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
282
0
'''simple docstring''' from ...processing_utils import ProcessorMixin class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = """SpeechT5FeatureExtractor""" __SCREAMING_SNAKE_CASE = """SpeechT5Tokenizer""" def __init__( self : List[Any] , a_ : str , a_ : str ): """simple docstring""" super().__init__(a_ , a_ ) def __call__( self : Dict , *a_ : Tuple , **a_ : List[str] ): """simple docstring""" __snake_case = kwargs.pop("audio" , a_ ) __snake_case = kwargs.pop("text" , a_ ) __snake_case = kwargs.pop("text_target" , a_ ) __snake_case = kwargs.pop("audio_target" , a_ ) __snake_case = kwargs.pop("sampling_rate" , a_ ) if audio is not None and text is not None: raise ValueError( "Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" ) if audio_target is not None and text_target is not None: raise ValueError( "Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( "You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." ) if audio is not None: __snake_case = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ ) elif text is not None: __snake_case = self.tokenizer(a_ , **a_ ) else: __snake_case = None if audio_target is not None: __snake_case = self.feature_extractor(audio_target=a_ , *a_ , sampling_rate=a_ , **a_ ) __snake_case = targets["input_values"] elif text_target is not None: __snake_case = self.tokenizer(a_ , **a_ ) __snake_case = targets["input_ids"] else: __snake_case = None if inputs is None: return targets if targets is not None: __snake_case = labels __snake_case = targets.get("attention_mask" ) if decoder_attention_mask is not None: __snake_case = decoder_attention_mask return inputs def A ( self : List[str] , *a_ : str , **a_ : Dict ): """simple docstring""" __snake_case = kwargs.pop("input_values" , a_ ) __snake_case = kwargs.pop("input_ids" , a_ ) __snake_case = kwargs.pop("labels" , a_ ) if input_values is not None and input_ids is not None: raise ValueError("Cannot process both `input_values` and `input_ids` inputs." ) if input_values is None and input_ids is None and labels is None: raise ValueError( "You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." ) if input_values is not None: __snake_case = self.feature_extractor.pad(a_ , *a_ , **a_ ) elif input_ids is not None: __snake_case = self.tokenizer.pad(a_ , **a_ ) else: __snake_case = None if labels is not None: if "input_ids" in labels or (isinstance(a_ , a_ ) and "input_ids" in labels[0]): __snake_case = self.tokenizer.pad(a_ , **a_ ) __snake_case = targets["input_ids"] else: __snake_case = self.feature_extractor.feature_size __snake_case = self.feature_extractor.num_mel_bins __snake_case = self.feature_extractor.pad(a_ , *a_ , **a_ ) __snake_case = feature_size_hack __snake_case = targets["input_values"] else: __snake_case = None if inputs is None: return targets if targets is not None: __snake_case = labels __snake_case = targets.get("attention_mask" ) if decoder_attention_mask is not None: __snake_case = decoder_attention_mask return inputs def A ( self : List[str] , *a_ : Any , **a_ : List[str] ): """simple docstring""" return self.tokenizer.batch_decode(*a_ , **a_ ) def A ( self : Optional[int] , *a_ : Union[str, Any] , **a_ : str ): """simple docstring""" return self.tokenizer.decode(*a_ , **a_ )
69
"""simple docstring""" from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class __A : '''simple docstring''' def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> List[Any]: """simple docstring""" raise NotImplementedError() def UpperCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" raise NotImplementedError() class __A ( A_ ): '''simple docstring''' def __init__( self : Tuple ,_snake_case : "AutoTokenizer" ,_snake_case : bool = False ,**_snake_case : List[str] ) -> Tuple: """simple docstring""" lowercase__ : Dict = tokenizer lowercase__ : Any = skip_prompt lowercase__ : int = decode_kwargs # variables used in the streaming process lowercase__ : Optional[Any] = [] lowercase__ : int = 0 lowercase__ : List[Any] = True def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> Any: """simple docstring""" if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError('''TextStreamer only supports batch size 1''' ) elif len(value.shape ) > 1: lowercase__ : Optional[int] = value[0] if self.skip_prompt and self.next_tokens_are_prompt: lowercase__ : Optional[Any] = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) lowercase__ : Union[str, Any] = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith('''\n''' ): lowercase__ : List[Any] = text[self.print_len :] lowercase__ : Dict = [] lowercase__ : int = 0 # If the last token is a CJK character, we print the characters. elif len(_snake_case ) > 0 and self._is_chinese_char(ord(text[-1] ) ): lowercase__ : List[str] = text[self.print_len :] self.print_len += len(_snake_case ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: lowercase__ : Tuple = text[self.print_len : text.rfind(''' ''' ) + 1] self.print_len += len(_snake_case ) self.on_finalized_text(_snake_case ) def UpperCAmelCase ( self : str ) -> int: """simple docstring""" if len(self.token_cache ) > 0: lowercase__ : Union[str, Any] = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs ) lowercase__ : Dict = text[self.print_len :] lowercase__ : Union[str, Any] = [] lowercase__ : Optional[int] = 0 else: lowercase__ : Union[str, Any] = '''''' lowercase__ : str = True self.on_finalized_text(_snake_case ,stream_end=_snake_case ) def UpperCAmelCase ( self : List[Any] ,_snake_case : str ,_snake_case : bool = False ) -> List[Any]: """simple docstring""" print(_snake_case ,flush=_snake_case ,end='''''' if not stream_end else None ) def UpperCAmelCase ( self : Dict ,_snake_case : Optional[Any] ) -> Any: """simple docstring""" if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False class __A ( A_ ): '''simple docstring''' def __init__( self : str ,_snake_case : "AutoTokenizer" ,_snake_case : bool = False ,_snake_case : Optional[float] = None ,**_snake_case : Dict ) -> Optional[Any]: """simple docstring""" super().__init__(_snake_case ,_snake_case ,**_snake_case ) lowercase__ : Union[str, Any] = Queue() lowercase__ : Any = None lowercase__ : str = timeout def UpperCAmelCase ( self : Dict ,_snake_case : str ,_snake_case : bool = False ) -> Tuple: """simple docstring""" self.text_queue.put(_snake_case ,timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal ,timeout=self.timeout ) def __iter__( self : Tuple ) -> int: """simple docstring""" return self def UpperCAmelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" lowercase__ : Tuple = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
560
0
'''simple docstring''' import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html a_ : List[Any] = "platform" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class a : _lowerCAmelCase = PegasusConfig _lowerCAmelCase = {} _lowerCAmelCase = """gelu""" def __init__( self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=False , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=20 , __magic_name__=2 , __magic_name__=1 , __magic_name__=0 , ) -> Optional[int]: _a = parent _a = batch_size _a = seq_length _a = is_training _a = use_labels _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = eos_token_id _a = pad_token_id _a = bos_token_id def __UpperCAmelCase ( self ) -> int: _a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) _a = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) _a = np.concatenate([input_ids, eos_tensor] , axis=1 ) _a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _a = prepare_pegasus_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ ) return config, inputs_dict def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]: _a = 20 _a = model_class_name(__magic_name__ ) _a = model.encode(inputs_dict['input_ids'] ) _a , _a = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _a = model.init_cache(decoder_input_ids.shape[0] , __magic_name__ , __magic_name__ ) _a = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) _a = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _a = model.decode( decoder_input_ids[:, :-1] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , decoder_position_ids=__magic_name__ , ) _a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) _a = model.decode( decoder_input_ids[:, -1:] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__magic_name__ , ) _a = model.decode(__magic_name__ , __magic_name__ ) _a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]: _a = 20 _a = model_class_name(__magic_name__ ) _a = model.encode(inputs_dict['input_ids'] ) _a , _a = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _a = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _a = model.init_cache(decoder_input_ids.shape[0] , __magic_name__ , __magic_name__ ) _a = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _a = model.decode( decoder_input_ids[:, :-1] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , decoder_position_ids=__magic_name__ , ) _a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) _a = model.decode( decoder_input_ids[:, -1:] , __magic_name__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__magic_name__ , decoder_position_ids=__magic_name__ , ) _a = model.decode(__magic_name__ , __magic_name__ , decoder_attention_mask=__magic_name__ ) _a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) def _A (lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :List[Any]=None , ) -> str: '''simple docstring''' if attention_mask is None: _a = np.not_equal(lowerCAmelCase__ , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _a = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowerCAmelCase = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _lowerCAmelCase = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _lowerCAmelCase = True _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def __UpperCAmelCase ( self ) -> Tuple: _a = FlaxPegasusModelTester(self ) _a = ConfigTester(self , config_class=__magic_name__ ) def __UpperCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() def __UpperCAmelCase ( self ) -> Dict: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__magic_name__ , __magic_name__ , __magic_name__ ) def __UpperCAmelCase ( self ) -> List[str]: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__magic_name__ , __magic_name__ , __magic_name__ ) def __UpperCAmelCase ( self ) -> Optional[int]: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _a = self._prepare_for_class(__magic_name__ , __magic_name__ ) _a = model_class(__magic_name__ ) @jax.jit def encode_jitted(__magic_name__ , __magic_name__=None , **__magic_name__ ): return model.encode(input_ids=__magic_name__ , attention_mask=__magic_name__ ) with self.subTest('JIT Enabled' ): _a = encode_jitted(**__magic_name__ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _a = encode_jitted(**__magic_name__ ).to_tuple() self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) ) for jitted_output, output in zip(__magic_name__ , __magic_name__ ): self.assertEqual(jitted_output.shape , output.shape ) def __UpperCAmelCase ( self ) -> List[Any]: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _a = model_class(__magic_name__ ) _a = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) _a = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(__magic_name__ , __magic_name__ , __magic_name__ ): return model.decode( decoder_input_ids=__magic_name__ , decoder_attention_mask=__magic_name__ , encoder_outputs=__magic_name__ , ) with self.subTest('JIT Enabled' ): _a = decode_jitted(**__magic_name__ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _a = decode_jitted(**__magic_name__ ).to_tuple() self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) ) for jitted_output, output in zip(__magic_name__ , __magic_name__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __UpperCAmelCase ( self ) -> int: for model_class_name in self.all_model_classes: _a = model_class_name.from_pretrained('google/pegasus-large' , from_pt=__magic_name__ ) _a = np.ones((1, 1) ) _a = model(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) @slow def __UpperCAmelCase ( self ) -> Optional[int]: _a = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' ) _a = PegasusTokenizer.from_pretrained('google/pegasus-xsum' ) _a = [ ' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.', ' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ', ] _a = [ 'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.', 'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.', ] _a = tokenizer(__magic_name__ , return_tensors='np' , truncation=__magic_name__ , max_length=5_12 , padding=__magic_name__ ) _a = model.generate(**__magic_name__ , num_beams=2 ).sequences _a = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ ) assert tgt_text == decoded
703
'''simple docstring''' import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py a_ : int = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. a_ : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. a_ : str = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") a_ : Union[str, Any] = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. a_ : List[Any] = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) a_ : int = [ ("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"), ("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"), ("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"), ("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"), ("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"), ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), ( "zero-shot-object-detection", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForZeroShotObjectDetection", ), ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), ("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"), ( "table-question-answering", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForTableQuestionAnswering", ), ("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"), ("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"), ( "next-sentence-prediction", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", "AutoModelForNextSentencePrediction", ), ( "audio-frame-classification", "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioFrameClassification", ), ("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"), ( "document-question-answering", "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForDocumentQuestionAnswering", ), ( "visual-question-answering", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForVisualQuestionAnswering", ), ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), ( "zero-shot-image-classification", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForZeroShotImageClassification", ), ("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"), ("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"), ("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"), ] def _A (lowerCAmelCase__ :List[Any] ) -> str: '''simple docstring''' _a = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowerCAmelCase__ ) return [m.group(0 ) for m in matches] def _A () -> Union[str, Any]: '''simple docstring''' _a = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES _a = { config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. _a = collections.defaultdict(lowerCAmelCase__ ) _a = collections.defaultdict(lowerCAmelCase__ ) _a = collections.defaultdict(lowerCAmelCase__ ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(lowerCAmelCase__ ): _a = None if _re_tf_models.match(lowerCAmelCase__ ) is not None: _a = tf_models _a = _re_tf_models.match(lowerCAmelCase__ ).groups()[0] elif _re_flax_models.match(lowerCAmelCase__ ) is not None: _a = flax_models _a = _re_flax_models.match(lowerCAmelCase__ ).groups()[0] elif _re_pt_models.match(lowerCAmelCase__ ) is not None: _a = pt_models _a = _re_pt_models.match(lowerCAmelCase__ ).groups()[0] if lookup_dict is not None: while len(lowerCAmelCase__ ) > 0: if attr_name in model_prefix_to_model_type: _a = True break # Try again after removing the last word in the name _a = ''.join(camel_case_split(lowerCAmelCase__ )[:-1] ) _a = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) _a = list(lowerCAmelCase__ ) all_models.sort() _a = {'model_type': all_models} _a = [pt_models[t] for t in all_models] _a = [tf_models[t] for t in all_models] _a = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure _a = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: _a = 'AutoProcessor' elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: _a = 'AutoTokenizer' elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: _a = 'AutoFeatureExtractor' else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. _a = 'AutoTokenizer' _a = [processors[t] for t in all_models] return pd.DataFrame(lowerCAmelCase__ ) def _A (lowerCAmelCase__ :Dict ) -> str: '''simple docstring''' _a = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: _a = [model_mapping, f'TF_{model_mapping}', f'FLAX_{model_mapping}'] _a = [auto_class, f'TF_{auto_class}', f'Flax_{auto_class}'] # Loop through all three frameworks for module, cls, mapping in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): # The type of pipeline may not exist in this framework if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ): continue # First extract all model_names _a = [] for name in getattr(lowerCAmelCase__ , lowerCAmelCase__ ).values(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): model_names.append(lowerCAmelCase__ ) else: model_names.extend(list(lowerCAmelCase__ ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def _A (lowerCAmelCase__ :Any , lowerCAmelCase__ :str ) -> Union[str, Any]: '''simple docstring''' _a = get_frameworks_table() _a = Dataset.from_pandas(lowerCAmelCase__ ) _a = hf_hub_download( 'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=lowerCAmelCase__ ) _a = Dataset.from_json(lowerCAmelCase__ ) _a = { tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class']) for i in range(len(lowerCAmelCase__ ) ) } _a = update_pipeline_and_auto_class_table(lowerCAmelCase__ ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. _a = sorted(table.keys() ) _a = pd.DataFrame( { 'model_class': model_classes, 'pipeline_tag': [table[m][0] for m in model_classes], 'auto_class': [table[m][1] for m in model_classes], } ) _a = Dataset.from_pandas(lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(lowerCAmelCase__ , 'frameworks.json' ) ) tags_dataset.to_json(os.path.join(lowerCAmelCase__ , 'pipeline_tags.json' ) ) if commit_sha is not None: _a = ( f'Update with commit {commit_sha}\n\nSee: ' f'https://github.com/huggingface/transformers/commit/{commit_sha}' ) else: _a = 'Update' upload_folder( repo_id='huggingface/transformers-metadata' , folder_path=lowerCAmelCase__ , repo_type='dataset' , token=lowerCAmelCase__ , commit_message=lowerCAmelCase__ , ) def _A () -> str: '''simple docstring''' _a = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} _a = transformers_module.pipelines.SUPPORTED_TASKS _a = [] for key in pipeline_tasks: if key not in in_table: _a = pipeline_tasks[key]['pt'] if isinstance(lowerCAmelCase__ , (list, tuple) ): _a = model[0] _a = model.__name__ if model not in in_table.values(): missing.append(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: _a = ', '.join(lowerCAmelCase__ ) raise ValueError( 'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside ' f'`utils/update_metadata.py`: {msg}. Please add them!' ) if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") a_ : Union[str, Any] = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
532
0
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
278
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() snake_case__ : Dict = logging.get_logger(__name__) def __lowerCamelCase ( A__ : Optional[Any] ) -> List[str]: lowerCamelCase_ : int = """huggingface/label-files""" lowerCamelCase_ : Dict = """imagenet-1k-id2label.json""" lowerCamelCase_ : Optional[Any] = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase_ : str = {int(A__ ): v for k, v in idalabel.items()} lowerCamelCase_ : List[Any] = {v: k for k, v in idalabel.items()} lowerCamelCase_ : str = """std_conv""" if """bit""" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" lowerCamelCase_ : Optional[Any] = BitConfig( conv_layer=A__ , num_labels=1000 , idalabel=A__ , labelaid=A__ , ) return config def __lowerCamelCase ( A__ : str ) -> Any: if "stem.conv" in name: lowerCamelCase_ : Any = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: lowerCamelCase_ : Dict = name.replace("""blocks""" , """layers""" ) if "head.fc" in name: lowerCamelCase_ : Optional[Any] = name.replace("""head.fc""" , """classifier.1""" ) if name.startswith("""norm""" ): lowerCamelCase_ : int = """bit.""" + name if "bit" not in name and "classifier" not in name: lowerCamelCase_ : str = """bit.encoder.""" + name return name def __lowerCamelCase ( ) -> List[Any]: lowerCamelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase_ : Optional[Any] = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( A__ : List[Any] , A__ : List[str] , A__ : Tuple=False ) -> List[str]: lowerCamelCase_ : Optional[Any] = get_config(A__ ) # load original model from timm lowerCamelCase_ : Optional[Any] = create_model(A__ , pretrained=A__ ) timm_model.eval() # load state_dict of original model lowerCamelCase_ : Optional[int] = timm_model.state_dict() for key in state_dict.copy().keys(): lowerCamelCase_ : int = state_dict.pop(A__ ) lowerCamelCase_ : Union[str, Any] = val.squeeze() if """head""" in key else val # load HuggingFace model lowerCamelCase_ : Tuple = BitForImageClassification(A__ ) model.eval() model.load_state_dict(A__ ) # create image processor lowerCamelCase_ : Optional[Any] = create_transform(**resolve_data_config({} , model=A__ ) ) lowerCamelCase_ : List[Any] = transform.transforms lowerCamelCase_ : List[Any] = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } lowerCamelCase_ : List[str] = BitImageProcessor( do_resize=A__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=A__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowerCamelCase_ : int = prepare_img() lowerCamelCase_ : int = transform(A__ ).unsqueeze(0 ) lowerCamelCase_ : List[str] = processor(A__ , return_tensors="""pt""" ).pixel_values # verify pixel values assert torch.allclose(A__ , A__ ) # verify logits with torch.no_grad(): lowerCamelCase_ : str = model(A__ ) lowerCamelCase_ : int = outputs.logits print("""Logits:""" , logits[0, :3] ) print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] ) lowerCamelCase_ : List[Any] = timm_model(A__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(A__ , outputs.logits , atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(A__ ).mkdir(exist_ok=A__ ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(A__ ) processor.save_pretrained(A__ ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": snake_case__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='resnetv2_50x1_bitm', type=str, help='Name of the BiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model to the hub.', ) snake_case__ : int = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
1
"""simple docstring""" def a_ ( _lowerCAmelCase : list ): '''simple docstring''' if len(_lowerCAmelCase ) <= 1: return [tuple(_lowerCAmelCase )] lowercase__ : Optional[Any] = [] def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ): lowercase__ : Optional[int] = [0] * n res.append(tuple(_lowerCAmelCase ) ) lowercase__ : List[Any] = 0 while i < n: if c[i] < i: if i % 2 == 0: lowercase__ , lowercase__ : int = arr[i], arr[0] else: lowercase__ , lowercase__ : str = arr[i], arr[c[i]] res.append(tuple(_lowerCAmelCase ) ) c[i] += 1 lowercase__ : int = 0 else: lowercase__ : List[Any] = 0 i += 1 generate(len(_lowerCAmelCase ) , _lowerCAmelCase ) return res if __name__ == "__main__": _UpperCamelCase : Dict = input("Enter numbers separated by a comma:\n").strip() _UpperCamelCase : Any = [int(item) for item in user_input.split(",")] print(heaps(arr))
645
"""simple docstring""" def a_ ( _lowerCAmelCase : str ): '''simple docstring''' lowercase__ : Any = [0] * len(_lowerCAmelCase ) for i in range(1 , len(_lowerCAmelCase ) ): # use last results for better performance - dynamic programming lowercase__ : List[str] = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: lowercase__ : Dict = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 lowercase__ : Union[str, Any] = j return prefix_result def a_ ( _lowerCAmelCase : str ): '''simple docstring''' return max(prefix_function(_lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod()
645
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCamelCase__ : Optional[int] = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : str = ["""SpeechEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Any = ["""FlaxSpeechEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys lowerCamelCase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
12
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer snake_case = logging.get_logger(__name__) snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} snake_case = { '''vocab_file''': { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt''' ), } } snake_case = { '''junnyu/roformer_chinese_small''': 1_5_3_6, '''junnyu/roformer_chinese_base''': 1_5_3_6, '''junnyu/roformer_chinese_char_small''': 5_1_2, '''junnyu/roformer_chinese_char_base''': 5_1_2, '''junnyu/roformer_small_discriminator''': 1_2_8, '''junnyu/roformer_small_generator''': 1_2_8, } snake_case = { '''junnyu/roformer_chinese_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_base''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True}, '''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True}, '''junnyu/roformer_small_generator''': {'''do_lower_case''': True}, } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): A__ : str = VOCAB_FILES_NAMES A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP A__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Dict = PRETRAINED_INIT_CONFIGURATION A__ : Any = RoFormerTokenizer def __init__( self : Optional[int] , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : int=True , __lowerCamelCase : Any="[UNK]" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Optional[Any]="[PAD]" , __lowerCamelCase : Union[str, Any]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : Any , ): """simple docstring""" super().__init__( __lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , ) _snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents ): _snake_case = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) ) _snake_case = do_lower_case _snake_case = strip_accents _snake_case = pre_tok_class(**__lowerCamelCase ) _snake_case = do_lower_case def __getstate__( self : int ): """simple docstring""" _snake_case = self.__dict__.copy() _snake_case = BertPreTokenizer() return state def __setstate__( self : Dict , __lowerCamelCase : Optional[Any] ): """simple docstring""" _snake_case = d _snake_case = self.__dict__['''_tokenizer'''].get_vocab() _snake_case = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) ) def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ): """simple docstring""" _snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCAmelCase ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): """simple docstring""" _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): """simple docstring""" _snake_case = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[str]=False , **__lowerCamelCase : List[Any] , ): """simple docstring""" _snake_case = BertPreTokenizer() return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
103
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def lowerCamelCase ( UpperCamelCase : Optional[Any] ) -> Tuple: _lowerCamelCase = SwinvaConfig() _lowerCamelCase = swinva_name.split('_' ) _lowerCamelCase = name_split[1] if "to" in name_split[3]: _lowerCamelCase = int(name_split[3][-3:] ) else: _lowerCamelCase = int(name_split[3] ) if "to" in name_split[2]: _lowerCamelCase = int(name_split[2][-2:] ) else: _lowerCamelCase = int(name_split[2][6:] ) if model_size == "tiny": _lowerCamelCase = 96 _lowerCamelCase = (2, 2, 6, 2) _lowerCamelCase = (3, 6, 12, 24) elif model_size == "small": _lowerCamelCase = 96 _lowerCamelCase = (2, 2, 18, 2) _lowerCamelCase = (3, 6, 12, 24) elif model_size == "base": _lowerCamelCase = 1_28 _lowerCamelCase = (2, 2, 18, 2) _lowerCamelCase = (4, 8, 16, 32) else: _lowerCamelCase = 1_92 _lowerCamelCase = (2, 2, 18, 2) _lowerCamelCase = (6, 12, 24, 48) if "to" in swinva_name: _lowerCamelCase = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): _lowerCamelCase = 2_18_41 _lowerCamelCase = 'huggingface/label-files' _lowerCamelCase = 'imagenet-22k-id2label.json' _lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='dataset' ) , 'r' ) ) _lowerCamelCase = {int(UpperCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase = idalabel _lowerCamelCase = {v: k for k, v in idalabel.items()} else: _lowerCamelCase = 10_00 _lowerCamelCase = 'huggingface/label-files' _lowerCamelCase = 'imagenet-1k-id2label.json' _lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='dataset' ) , 'r' ) ) _lowerCamelCase = {int(UpperCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase = idalabel _lowerCamelCase = {v: k for k, v in idalabel.items()} _lowerCamelCase = img_size _lowerCamelCase = num_classes _lowerCamelCase = embed_dim _lowerCamelCase = depths _lowerCamelCase = num_heads _lowerCamelCase = window_size return config def lowerCamelCase ( UpperCamelCase : Dict ) -> Dict: if "patch_embed.proj" in name: _lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: _lowerCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: _lowerCamelCase = 'encoder.' + name if "attn.proj" in name: _lowerCamelCase = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: _lowerCamelCase = name.replace('attn' , 'attention.self' ) if "norm1" in name: _lowerCamelCase = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: _lowerCamelCase = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: _lowerCamelCase = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: _lowerCamelCase = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: _lowerCamelCase = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: _lowerCamelCase = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: _lowerCamelCase = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: _lowerCamelCase = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if name == "norm.weight": _lowerCamelCase = 'layernorm.weight' if name == "norm.bias": _lowerCamelCase = 'layernorm.bias' if "head" in name: _lowerCamelCase = name.replace('head' , 'classifier' ) else: _lowerCamelCase = 'swinv2.' + name return name def lowerCamelCase ( UpperCamelCase : Dict , UpperCamelCase : List[Any] ) -> Any: for key in orig_state_dict.copy().keys(): _lowerCamelCase = orig_state_dict.pop(UpperCamelCase ) if "mask" in key: continue elif "qkv" in key: _lowerCamelCase = key.split('.' ) _lowerCamelCase = int(key_split[1] ) _lowerCamelCase = int(key_split[3] ) _lowerCamelCase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: _lowerCamelCase = val[:dim, :] _lowerCamelCase = val[dim : dim * 2, :] _lowerCamelCase = val[-dim:, :] else: _lowerCamelCase = val[:dim] _lowerCamelCase = val[ dim : dim * 2 ] _lowerCamelCase = val[-dim:] else: _lowerCamelCase = val return orig_state_dict def lowerCamelCase ( UpperCamelCase : List[Any] , UpperCamelCase : Any ) -> str: _lowerCamelCase = timm.create_model(UpperCamelCase , pretrained=UpperCamelCase ) timm_model.eval() _lowerCamelCase = get_swinva_config(UpperCamelCase ) _lowerCamelCase = SwinvaForImageClassification(UpperCamelCase ) model.eval() _lowerCamelCase = convert_state_dict(timm_model.state_dict() , UpperCamelCase ) model.load_state_dict(UpperCamelCase ) _lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _lowerCamelCase = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) ) _lowerCamelCase = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) _lowerCamelCase = image_processor(images=UpperCamelCase , return_tensors='pt' ) _lowerCamelCase = timm_model(inputs['pixel_values'] ) _lowerCamelCase = model(**UpperCamelCase ).logits assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(UpperCamelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(UpperCamelCase ) model.push_to_hub( repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization='nandwalritik' , commit_message='Add model' , ) if __name__ == "__main__": A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swinv2_name', default='swinv2_tiny_patch4_window8_256', type=str, help='Name of the Swinv2 timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) A = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
714
A = 8.3_14_45_98 def lowerCamelCase ( UpperCamelCase : float , UpperCamelCase : float ) -> float: if temperature < 0: raise Exception('Temperature cannot be less than 0 K' ) if molar_mass <= 0: raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example A = 3_0_0 A = 2_8 A = rms_speed_of_molecule(temperature, molar_mass) print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
234
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a : Any = { "configuration_instructblip": [ "INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "InstructBlipConfig", "InstructBlipQFormerConfig", "InstructBlipVisionConfig", ], "processing_instructblip": ["InstructBlipProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Tuple = [ "INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "InstructBlipQFormerModel", "InstructBlipPreTrainedModel", "InstructBlipForConditionalGeneration", "InstructBlipVisionModel", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys _a : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
168
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class __A : def __init__( self , UpperCamelCase_ , ): __UpperCAmelCase : Any = parent __UpperCAmelCase : Dict = 13 __UpperCAmelCase : Tuple = 7 __UpperCAmelCase : List[Any] = True __UpperCAmelCase : Tuple = True __UpperCAmelCase : Optional[Any] = True __UpperCAmelCase : Optional[Any] = True __UpperCAmelCase : List[Any] = True __UpperCAmelCase : Optional[int] = False __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : List[Any] = False __UpperCAmelCase : Union[str, Any] = 2 __UpperCAmelCase : Dict = 99 __UpperCAmelCase : Dict = 0 __UpperCAmelCase : List[Any] = 32 __UpperCAmelCase : Any = 2 __UpperCAmelCase : str = 4 __UpperCAmelCase : List[Any] = 0.1 __UpperCAmelCase : Optional[int] = 0.1 __UpperCAmelCase : Union[str, Any] = 5_12 __UpperCAmelCase : int = 16 __UpperCAmelCase : List[Any] = 2 __UpperCAmelCase : int = 0.0_2 __UpperCAmelCase : Optional[int] = 3 __UpperCAmelCase : List[str] = 4 __UpperCAmelCase : List[Any] = "last" __UpperCAmelCase : List[str] = True __UpperCAmelCase : str = None __UpperCAmelCase : Any = 0 def _snake_case ( self ): __UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) __UpperCAmelCase : Union[str, Any] = None if self.use_input_lengths: __UpperCAmelCase : str = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __UpperCAmelCase : Dict = None if self.use_token_type_ids: __UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __UpperCAmelCase : Union[str, Any] = None __UpperCAmelCase : Any = None __UpperCAmelCase : Tuple = None if self.use_labels: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : str = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): __UpperCAmelCase : Dict = TFFlaubertModel(config=UpperCamelCase_ ) __UpperCAmelCase : int = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} __UpperCAmelCase : List[str] = model(UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = [input_ids, input_mask] __UpperCAmelCase : List[Any] = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): __UpperCAmelCase : Dict = TFFlaubertWithLMHeadModel(UpperCamelCase_ ) __UpperCAmelCase : Tuple = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} __UpperCAmelCase : Dict = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): __UpperCAmelCase : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(UpperCamelCase_ ) __UpperCAmelCase : str = {"input_ids": input_ids, "lengths": input_lengths} __UpperCAmelCase : Tuple = model(UpperCamelCase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): __UpperCAmelCase : Tuple = TFFlaubertForSequenceClassification(UpperCamelCase_ ) __UpperCAmelCase : List[Any] = {"input_ids": input_ids, "lengths": input_lengths} __UpperCAmelCase : str = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): __UpperCAmelCase : Optional[int] = self.num_labels __UpperCAmelCase : Dict = TFFlaubertForTokenClassification(config=UpperCamelCase_ ) __UpperCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __UpperCAmelCase : Tuple = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): __UpperCAmelCase : Tuple = self.num_choices __UpperCAmelCase : Optional[int] = TFFlaubertForMultipleChoice(config=UpperCamelCase_ ) __UpperCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : Optional[int] = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : List[str] = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : Optional[Any] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } __UpperCAmelCase : Tuple = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self ): __UpperCAmelCase : int = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Optional[int] = config_and_inputs __UpperCAmelCase : str = { "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class __A (__magic_name__ , __magic_name__ , unittest.TestCase ): snake_case :List[str] = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) snake_case :List[str] = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable snake_case :Optional[Any] = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) snake_case :Tuple = False snake_case :Any = False def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _snake_case ( self ): __UpperCAmelCase : List[str] = TFFlaubertModelTester(self ) __UpperCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , emb_dim=37 ) def _snake_case ( self ): self.config_tester.run_common_tests() def _snake_case ( self ): __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*UpperCamelCase_ ) def _snake_case ( self ): __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*UpperCamelCase_ ) @slow def _snake_case ( self ): for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Tuple = TFFlaubertModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @require_tf @require_sentencepiece @require_tokenizers class __A (unittest.TestCase ): @slow def _snake_case ( self ): __UpperCAmelCase : str = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" ) __UpperCAmelCase : Tuple = tf.convert_to_tensor( [[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" __UpperCAmelCase : int = model(UpperCamelCase_ )[0] __UpperCAmelCase : str = tf.TensorShape((1, 8, 5_12) ) self.assertEqual(output.shape , UpperCamelCase_ ) # compare the actual values for a slice. __UpperCAmelCase : Tuple = tf.convert_to_tensor( [ [ [-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8], [-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9], [-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
168
1
"""simple docstring""" import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed UpperCamelCase = """true""" def lowerCAmelCase ( UpperCamelCase_: Any , UpperCamelCase_: List[Any]=82 , UpperCamelCase_: Optional[Any]=16 ) -> Dict: '''simple docstring''' set_seed(42 ) _a = RegressionModel() _a = deepcopy(UpperCamelCase_ ) _a = RegressionDataset(length=UpperCamelCase_ ) _a = DataLoader(UpperCamelCase_ , batch_size=UpperCamelCase_ ) model.to(accelerator.device ) _a , _a = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ ) return model, ddp_model, dataloader def lowerCAmelCase ( UpperCamelCase_: Accelerator , UpperCamelCase_: Tuple=False ) -> Optional[Any]: '''simple docstring''' _a = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) _a = load_dataset("glue" , "mrpc" , split="validation" ) def tokenize_function(UpperCamelCase_: Optional[int] ): _a = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ ) return outputs with accelerator.main_process_first(): _a = dataset.map( UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , ) _a = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(UpperCamelCase_: Tuple ): if use_longest: return tokenizer.pad(UpperCamelCase_ , padding="longest" , return_tensors="pt" ) return tokenizer.pad(UpperCamelCase_ , padding="max_length" , max_length=128 , return_tensors="pt" ) return DataLoader(UpperCamelCase_ , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=16 ) def lowerCAmelCase ( UpperCamelCase_: int , UpperCamelCase_: str ) -> List[str]: '''simple docstring''' _a = Accelerator(dispatch_batches=UpperCamelCase_ , split_batches=UpperCamelCase_ ) _a = get_dataloader(UpperCamelCase_ , not dispatch_batches ) _a = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" , return_dict=UpperCamelCase_ ) _a , _a = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def lowerCAmelCase ( UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int ) -> Optional[Any]: '''simple docstring''' _a = [] for batch in dataloader: _a , _a = batch.values() with torch.no_grad(): _a = model(UpperCamelCase_ ) _a , _a = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) _a , _a = [], [] for logit, targ in logits_and_targets: logits.append(UpperCamelCase_ ) targs.append(UpperCamelCase_ ) _a , _a = torch.cat(UpperCamelCase_ ), torch.cat(UpperCamelCase_ ) return logits, targs def lowerCAmelCase ( UpperCamelCase_: Accelerator , UpperCamelCase_: int=82 , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: Dict=16 ) -> List[str]: '''simple docstring''' _a , _a , _a = get_basic_setup(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) _a , _a = generate_predictions(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) assert ( len(UpperCamelCase_ ) == num_samples ), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCamelCase_ )}''' def lowerCAmelCase ( UpperCamelCase_: bool = False , UpperCamelCase_: bool = False ) -> Optional[Any]: '''simple docstring''' _a = evaluate.load("glue" , "mrpc" ) _a , _a = get_mrpc_setup(UpperCamelCase_ , UpperCamelCase_ ) # First do baseline _a , _a , _a = setup["no"] model.to(UpperCamelCase_ ) model.eval() for batch in dataloader: batch.to(UpperCamelCase_ ) with torch.inference_mode(): _a = model(**UpperCamelCase_ ) _a = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=UpperCamelCase_ , references=batch["labels"] ) _a = metric.compute() # Then do distributed _a , _a , _a = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): _a = model(**UpperCamelCase_ ) _a = outputs.logits.argmax(dim=-1 ) _a = batch["labels"] _a , _a = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=UpperCamelCase_ , references=UpperCamelCase_ ) _a = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def lowerCAmelCase ( ) -> Optional[Any]: '''simple docstring''' _a = Accelerator(split_batches=UpperCamelCase_ , dispatch_batches=UpperCamelCase_ ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' ) test_mrpc(UpperCamelCase_ , UpperCamelCase_ ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: _a = Accelerator(split_batches=UpperCamelCase_ , dispatch_batches=UpperCamelCase_ ) if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' ) test_torch_metrics(UpperCamelCase_ , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) _a = Accelerator() test_torch_metrics(UpperCamelCase_ , 512 ) accelerator.state._reset_state() def lowerCAmelCase ( UpperCamelCase_: Optional[int] ) -> List[str]: '''simple docstring''' main() if __name__ == "__main__": main()
612
"""simple docstring""" import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowercase_ : A__ : str = field( default=_UpperCAmelCase, metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_UpperCAmelCase )} ) A__ : str = field( default=_UpperCAmelCase, metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} ) A__ : int = field( default=128, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) A__ : int = field( default=128, metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''}, ) A__ : int = field( default=64, metadata={ '''help''': ( '''The maximum number of tokens for the question. Questions longer than this will ''' '''be truncated to this length.''' ) }, ) A__ : int = field( default=30, metadata={ '''help''': ( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ) }, ) A__ : bool = field( default=_UpperCAmelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) A__ : bool = field( default=_UpperCAmelCase, metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} ) A__ : float = field( default=0.0, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) A__ : int = field( default=20, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) A__ : int = field( default=0, metadata={ '''help''': ( '''language id of input for language-specific xlm models (see''' ''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)''' ) }, ) A__ : int = field(default=1, metadata={'''help''': '''multiple threads for converting example to features'''} ) class lowercase_ (_UpperCAmelCase ): A__ : Tuple = '''train''' A__ : List[Any] = '''dev''' class lowercase_ (_UpperCAmelCase ): A__ : SquadDataTrainingArguments A__ : List[SquadFeatures] A__ : Split A__ : bool def __init__( self , a_ , a_ , a_ = None , a_ = Split.train , a_ = False , a_ = None , a_ = "pt" , ) ->List[str]: '''simple docstring''' _a = args _a = is_language_sensitive _a = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(a_ , a_ ): try: _a = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) _a = mode # Load data features from cache or dataset file _a = "v2" if args.version_2_with_negative else "v1" _a = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _a = cached_features_file + ".lock" with FileLock(a_ ): if os.path.exists(a_ ) and not args.overwrite_cache: _a = time.time() _a = torch.load(a_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. _a = self.old_features["features"] _a = self.old_features.get("dataset" , a_ ) _a = self.old_features.get("examples" , a_ ) logger.info( f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' " future run" ) else: if mode == Split.dev: _a = self.processor.get_dev_examples(args.data_dir ) else: _a = self.processor.get_train_examples(args.data_dir ) _a , _a = squad_convert_examples_to_features( examples=self.examples , tokenizer=a_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=a_ , ) _a = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , a_ , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) ->Optional[int]: '''simple docstring''' return len(self.features ) def __getitem__( self , a_ ) ->Dict[str, torch.Tensor]: '''simple docstring''' _a = self.features[i] _a = torch.tensor(feature.input_ids , dtype=torch.long ) _a = torch.tensor(feature.attention_mask , dtype=torch.long ) _a = torch.tensor(feature.token_type_ids , dtype=torch.long ) _a = torch.tensor(feature.cls_index , dtype=torch.long ) _a = torch.tensor(feature.p_mask , dtype=torch.float ) _a = torch.tensor(feature.is_impossible , dtype=torch.float ) _a = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: _a = torch.tensor(feature.start_position , dtype=torch.long ) _a = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
612
1
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class __A ( UpperCamelCase__ , unittest.TestCase ): UpperCamelCase = XGLMTokenizer UpperCamelCase = XGLMTokenizerFast UpperCamelCase = True UpperCamelCase = True def A__ ( self :Tuple ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __magic_name__ : Optional[Any] =XGLMTokenizer(__snake_case , keep_accents=__snake_case ) tokenizer.save_pretrained(self.tmpdirname ) def A__ ( self :Any ): '''simple docstring''' __magic_name__ : Tuple ="""<pad>""" __magic_name__ : Tuple =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case ) def A__ ( self :Tuple ): '''simple docstring''' __magic_name__ : List[Any] =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(len(__snake_case ) , 10_08 ) def A__ ( self :Union[str, Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 10_08 ) def A__ ( self :Optional[Any] ): '''simple docstring''' __magic_name__ : Dict =XGLMTokenizer(__snake_case , keep_accents=__snake_case ) __magic_name__ : Any =tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) __magic_name__ : List[Any] =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __snake_case , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) __magic_name__ : Optional[Any] =tokenizer.convert_tokens_to_ids(__snake_case ) self.assertListEqual( __snake_case , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __magic_name__ : Tuple =tokenizer.convert_ids_to_tokens(__snake_case ) self.assertListEqual( __snake_case , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def A__ ( self :str ): '''simple docstring''' return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) def A__ ( self :int ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__snake_case , f.name ) __magic_name__ : Tuple =XGLMTokenizer(f.name , keep_accents=__snake_case ) __magic_name__ : Optional[Any] =pickle.dumps(__snake_case ) pickle.loads(__snake_case ) def A__ ( self :Dict ): '''simple docstring''' if not self.test_rust_tokenizer: return __magic_name__ : List[str] =self.get_tokenizer() __magic_name__ : Dict =self.get_rust_tokenizer() __magic_name__ : Tuple ="""I was born in 92000, and this is falsé.""" __magic_name__ : List[str] =tokenizer.tokenize(__snake_case ) __magic_name__ : Tuple =rust_tokenizer.tokenize(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) __magic_name__ : List[str] =tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) __magic_name__ : Optional[Any] =rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) self.assertListEqual(__snake_case , __snake_case ) __magic_name__ : Dict =self.get_rust_tokenizer() __magic_name__ : Dict =tokenizer.encode(__snake_case ) __magic_name__ : List[str] =rust_tokenizer.encode(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) @slow def A__ ( self :Any ): '''simple docstring''' __magic_name__ : Any ="""Hello World!""" __magic_name__ : List[Any] =[2, 3_12_27, 44_47, 35] self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) ) @slow def A__ ( self :int ): '''simple docstring''' __magic_name__ : Union[str, Any] =( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth""" ) # fmt: off __magic_name__ : Tuple =[2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35] # fmt: on self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) ) @slow def A__ ( self :int ): '''simple docstring''' __magic_name__ : int ={ """input_ids""": [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__snake_case , model_name="""facebook/xglm-564M""" , padding=__snake_case , )
21
"""simple docstring""" A = 8.31_4462 # Unit - J mol-1 K-1 def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: float , lowerCamelCase_: float , lowerCamelCase_: float ): """simple docstring""" if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: float , lowerCamelCase_: float , lowerCamelCase_: float ): """simple docstring""" if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
449
0
import unittest from knapsack import knapsack as k class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : List[Any] ): lowercase__ : List[Any] = 0 lowercase__ : Union[str, Any] = [0] lowercase__ : List[str] = [0] lowercase__ : str = len(SCREAMING_SNAKE_CASE ) self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 0 ) lowercase__ : Tuple = [60] lowercase__ : List[str] = [10] lowercase__ : str = len(SCREAMING_SNAKE_CASE ) self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 0 ) def snake_case ( self : Union[str, Any] ): lowercase__ : str = 3 lowercase__ : List[Any] = [1, 2, 3] lowercase__ : Optional[Any] = [3, 2, 1] lowercase__ : str = len(SCREAMING_SNAKE_CASE ) self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 5 ) def snake_case ( self : Tuple ): lowercase__ : List[str] = 50 lowercase__ : Tuple = [60, 100, 120] lowercase__ : Optional[Any] = [10, 20, 30] lowercase__ : str = len(SCREAMING_SNAKE_CASE ) self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 220 ) if __name__ == "__main__": unittest.main()
81
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case__(_UpperCamelCase , unittest.TestCase ): """simple docstring""" lowercase_ = GPTaTokenizer lowercase_ = GPTaTokenizerFast lowercase_ = True lowercase_ = {"""add_prefix_space""": True} lowercase_ = False def snake_case ( self : Any ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__ : Union[str, Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : List[str] = {"unk_token": "<unk>"} lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE ) ) def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ): kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ): kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ): lowercase__ : List[str] = "lower newer" lowercase__ : Optional[Any] = "lower newer" return input_text, output_text def snake_case ( self : Any ): lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__ : Dict = "lower newer" lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ : Any = tokens + [tokenizer.unk_token] lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def snake_case ( self : Optional[Any] ): if not self.test_rust_tokenizer: return lowercase__ : Dict = self.get_tokenizer() lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : int = "lower newer" # Testing tokenization lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing conversion to ids without special tokens lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing conversion to ids with special tokens lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Testing the unknown token lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token] lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ): # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # Simple input lowercase__ : Dict = "This is a simple input" lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"] lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair") lowercase__ : Optional[int] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Simple input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Simple input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Pair input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , ) def snake_case ( self : Any ): lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input lowercase__ : Optional[int] = "This is a simple input" lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"] lowercase__ : List[Any] = ("This is a simple input", "This is a pair") lowercase__ : Optional[Any] = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] lowercase__ : Any = tokenizer.pad_token_id lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" ) lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" ) lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" ) lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def snake_case ( self : str ): lowercase__ : List[str] = "$$$" lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[int] = "This is a simple input" lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"] lowercase__ : Optional[int] = tokenizer.bos_token_id lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE ) lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE ) self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids ) lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def snake_case ( self : Optional[int] ): pass def snake_case ( self : Tuple ): # TODO: change to self.get_tokenizers() when the fast version is implemented lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )] for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): lowercase__ : str = "Encode this." lowercase__ : List[Any] = "This one too please." lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) lowercase__ : Dict = tokenizer.encode_plus( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , ) lowercase__ : Tuple = encoded_sequence_dict["input_ids"] lowercase__ : int = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) ) lowercase__ : List[str] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE ) ] lowercase__ : Any = [x for x in filtered_sequence if x is not None] self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @require_tokenizers class snake_case__(unittest.TestCase ): """simple docstring""" def snake_case ( self : Union[str, Any] ): # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = "A photo of a cat" lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("test_opt" ) lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" ) lowercase__ : Dict = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) def snake_case ( self : Union[str, Any] ): lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE ) lowercase__ : int = "A photo of a cat" lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) # Same as above self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def snake_case ( self : Tuple ): lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE ) lowercase__ : Optional[Any] = "bos" lowercase__ : List[Any] = tokenizer.get_vocab()["bos"] lowercase__ : Optional[Any] = "A photo of a cat" lowercase__ : Union[str, Any] = tokenizer.encode( SCREAMING_SNAKE_CASE , ) # We changed the bos token self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("./tok" ) lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) lowercase__ : Tuple = tokenizer.encode( SCREAMING_SNAKE_CASE , ) self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
81
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _SCREAMING_SNAKE_CASE ( __a ): __SCREAMING_SNAKE_CASE :int = """blenderbot-small""" __SCREAMING_SNAKE_CASE :List[str] = ["""past_key_values"""] __SCREAMING_SNAKE_CASE :int = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Optional[int] , a__ : Any=5_0265 , a__ : Any=512 , a__ : Dict=8 , a__ : int=2048 , a__ : Optional[Any]=16 , a__ : Union[str, Any]=8 , a__ : Optional[Any]=2048 , a__ : Optional[int]=16 , a__ : str=0.0 , a__ : Dict=0.0 , a__ : Tuple=True , a__ : Any=True , a__ : List[Any]="gelu" , a__ : Union[str, Any]=512 , a__ : List[str]=0.1 , a__ : Any=0.0 , a__ : int=0.0 , a__ : Tuple=0.02 , a__ : int=1 , a__ : str=False , a__ : Optional[int]=0 , a__ : List[Any]=1 , a__ : Any=2 , a__ : int=2 , **a__ : List[Any] , ): __magic_name__ = vocab_size __magic_name__ = max_position_embeddings __magic_name__ = d_model __magic_name__ = encoder_ffn_dim __magic_name__ = encoder_layers __magic_name__ = encoder_attention_heads __magic_name__ = decoder_ffn_dim __magic_name__ = decoder_layers __magic_name__ = decoder_attention_heads __magic_name__ = dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = activation_function __magic_name__ = init_std __magic_name__ = encoder_layerdrop __magic_name__ = decoder_layerdrop __magic_name__ = use_cache __magic_name__ = encoder_layers __magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , forced_eos_token_id=a__ , **a__ , ) class _SCREAMING_SNAKE_CASE ( __a ): @property def snake_case__ ( self : Optional[int] ): if self.task in ["default", "seq2seq-lm"]: __magic_name__ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: __magic_name__ = {0: '''batch'''} __magic_name__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: __magic_name__ = {0: '''batch''', 1: '''decoder_sequence'''} __magic_name__ = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(a__ , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. __magic_name__ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: __magic_name__ , __magic_name__ = self.num_layers for i in range(a__ ): __magic_name__ = {0: '''batch''', 2: '''past_sequence + sequence'''} __magic_name__ = {0: '''batch''', 2: '''past_sequence + sequence'''} else: __magic_name__ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def snake_case__ ( self : Dict ): if self.task in ["default", "seq2seq-lm"]: __magic_name__ = super().outputs else: __magic_name__ = super(a__ , self ).outputs if self.use_past: __magic_name__ , __magic_name__ = self.num_layers for i in range(a__ ): __magic_name__ = {0: '''batch''', 2: '''past_sequence + sequence'''} __magic_name__ = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def snake_case__ ( self : str , a__ : PreTrainedTokenizer , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional[TensorType] = None , ): __magic_name__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( a__ , a__ , a__ , a__ , a__ ) # Generate decoder inputs __magic_name__ = seq_length if not self.use_past else 1 __magic_name__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( a__ , a__ , a__ , a__ , a__ ) __magic_name__ = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} __magic_name__ = dict(**a__ , **a__ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __magic_name__ , __magic_name__ = common_inputs['''input_ids'''].shape __magic_name__ = common_inputs['''decoder_input_ids'''].shape[1] __magic_name__ , __magic_name__ = self.num_attention_heads __magic_name__ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __magic_name__ = decoder_seq_length + 3 __magic_name__ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __magic_name__ = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(a__ , a__ )] , dim=1 ) __magic_name__ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __magic_name__ , __magic_name__ = self.num_layers __magic_name__ = min(a__ , a__ ) __magic_name__ = max(a__ , a__ ) - min_num_layers __magic_name__ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(a__ ): common_inputs["past_key_values"].append( ( torch.zeros(a__ ), torch.zeros(a__ ), torch.zeros(a__ ), torch.zeros(a__ ), ) ) # TODO: test this. __magic_name__ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(a__ , a__ ): common_inputs["past_key_values"].append((torch.zeros(a__ ), torch.zeros(a__ )) ) return common_inputs def snake_case__ ( self : Dict , a__ : PreTrainedTokenizer , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional[TensorType] = None , ): __magic_name__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( a__ , a__ , a__ , a__ , a__ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch __magic_name__ , __magic_name__ = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values __magic_name__ = seqlen + 2 __magic_name__ , __magic_name__ = self.num_layers __magic_name__ , __magic_name__ = self.num_attention_heads __magic_name__ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __magic_name__ = common_inputs['''attention_mask'''].dtype __magic_name__ = torch.cat( [common_inputs['''attention_mask'''], torch.ones(a__ , a__ , dtype=a__ )] , dim=1 ) __magic_name__ = [ (torch.zeros(a__ ), torch.zeros(a__ )) for _ in range(a__ ) ] return common_inputs def snake_case__ ( self : Optional[int] , a__ : PreTrainedTokenizer , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __magic_name__ = compute_effective_axis_dimension( a__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __magic_name__ = tokenizer.num_special_tokens_to_add(a__ ) __magic_name__ = compute_effective_axis_dimension( a__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a__ ) # Generate dummy inputs according to compute batch and sequence __magic_name__ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size __magic_name__ = dict(tokenizer(a__ , return_tensors=a__ ) ) return common_inputs def snake_case__ ( self : Tuple , a__ : PreTrainedTokenizer , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: __magic_name__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ ) elif self.task == "causal-lm": __magic_name__ = self._generate_dummy_inputs_for_causal_lm( a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ ) else: __magic_name__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ ) return common_inputs def snake_case__ ( self : Tuple , a__ : Optional[int] , a__ : Tuple , a__ : List[str] , a__ : Union[str, Any] ): if self.task in ["default", "seq2seq-lm"]: __magic_name__ = super()._flatten_past_key_values_(a__ , a__ , a__ , a__ ) else: __magic_name__ = super(a__ , self )._flatten_past_key_values_( a__ , a__ , a__ , a__ )
432
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" ,"""False""" ) ) is not True ,reason="""Skipping test because should only be run when releasing minor transformers version""" ,) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.g4dn.xlarge""", """results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9}, }, { """framework""": """tensorflow""", """script""": """run_tf.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.g4dn.xlarge""", """results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9}, }, ] ) class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def snake_case__ ( self : int ): if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=a__ , ) assert hasattr(self , '''env''' ) def snake_case__ ( self : str , a__ : int=1 ): # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=a__ , instance_type=self.instance_type , debugger_hook_config=a__ , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , ) def snake_case__ ( self : Optional[int] , a__ : Tuple ): TrainingJobAnalytics(a__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) def snake_case__ ( self : Any ): # create estimator __magic_name__ = self.create_estimator() # run training estimator.fit() # result dataframe __magic_name__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis __magic_name__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) __magic_name__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping __magic_name__ = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , a__ )
432
1
import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def __magic_name__ ( __a : List[str] ): '''simple docstring''' UpperCamelCase__ = torch.exp(__a ) UpperCamelCase__ = torch.sum(__a , dim=1 ) # sum of exp(x_i) UpperCamelCase__ = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(__a ) - B / A class __A( nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__() UpperCamelCase__ = config.output_attentions UpperCamelCase__ = config.output_hidden_states UpperCamelCase__ = nn.ModuleList([BertLayer(SCREAMING_SNAKE_CASE_ ) for _ in range(config.num_hidden_layers )] ) UpperCamelCase__ = nn.ModuleList([BertHighway(SCREAMING_SNAKE_CASE_ ) for _ in range(config.num_hidden_layers )] ) UpperCamelCase__ = [-1 for _ in range(config.num_hidden_layers )] def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): if (type(SCREAMING_SNAKE_CASE_ ) is float) or (type(SCREAMING_SNAKE_CASE_ ) is int): for i in range(len(self.early_exit_entropy ) ): UpperCamelCase__ = x else: UpperCamelCase__ = x def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ): UpperCamelCase__ = () UpperCamelCase__ = () UpperCamelCase__ = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: UpperCamelCase__ = all_hidden_states + (hidden_states,) UpperCamelCase__ = layer_module( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , head_mask[i] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = layer_outputs[0] if self.output_attentions: UpperCamelCase__ = all_attentions + (layer_outputs[1],) UpperCamelCase__ = (hidden_states,) if self.output_hidden_states: UpperCamelCase__ = current_outputs + (all_hidden_states,) if self.output_attentions: UpperCamelCase__ = current_outputs + (all_attentions,) UpperCamelCase__ = self.highway[i](SCREAMING_SNAKE_CASE_ ) # logits, pooled_output if not self.training: UpperCamelCase__ = highway_exit[0] UpperCamelCase__ = entropy(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy UpperCamelCase__ = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: UpperCamelCase__ = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(SCREAMING_SNAKE_CASE_ , i + 1 ) else: UpperCamelCase__ = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: UpperCamelCase__ = all_hidden_states + (hidden_states,) UpperCamelCase__ = (hidden_states,) if self.output_hidden_states: UpperCamelCase__ = outputs + (all_hidden_states,) if self.output_attentions: UpperCamelCase__ = outputs + (all_attentions,) UpperCamelCase__ = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( """The Bert Model transformer with early exiting (DeeBERT). """ , __lowerCamelCase , ) class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = config UpperCamelCase__ = BertEmbeddings(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = DeeBertEncoder(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = BertPooler(SCREAMING_SNAKE_CASE_ ) self.init_weights() def UpperCAmelCase_ (self ): self.encoder.init_highway_pooler(self.pooler ) def UpperCAmelCase_ (self ): return self.embeddings.word_embeddings def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = value def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(SCREAMING_SNAKE_CASE_ ) @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ): if input_ids is not None and inputs_embeds is not None: raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" ) elif input_ids is not None: UpperCamelCase__ = input_ids.size() elif inputs_embeds is not None: UpperCamelCase__ = inputs_embeds.size()[:-1] else: raise ValueError("""You have to specify either input_ids or inputs_embeds""" ) UpperCamelCase__ = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: UpperCamelCase__ = torch.ones(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ ) if encoder_attention_mask is None: UpperCamelCase__ = torch.ones(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ ) if token_type_ids is None: UpperCamelCase__ = torch.zeros(SCREAMING_SNAKE_CASE_ , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. UpperCamelCase__ = self.get_extended_attention_mask(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: UpperCamelCase__ = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: UpperCamelCase__ = encoder_attention_mask[:, None, None, :] UpperCamelCase__ = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility UpperCamelCase__ = (1.0 - encoder_extended_attention_mask) * -1_0000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] UpperCamelCase__ = self.get_head_mask(SCREAMING_SNAKE_CASE_ , self.config.num_hidden_layers ) UpperCamelCase__ = self.embeddings( input_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.encoder( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = encoder_outputs[0] UpperCamelCase__ = self.pooler(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = message UpperCamelCase__ = exit_layer # start from 1! class __A( nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__() UpperCamelCase__ = BertPooler(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = nn.Dropout(config.hidden_dropout_prob ) UpperCamelCase__ = nn.Linear(config.hidden_size , config.num_labels ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): # Pooler UpperCamelCase__ = encoder_outputs[0] UpperCamelCase__ = self.pooler(SCREAMING_SNAKE_CASE_ ) # "return" pooler_output # BertModel UpperCamelCase__ = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification UpperCamelCase__ = bmodel_output[1] UpperCamelCase__ = self.dropout(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.classifier(SCREAMING_SNAKE_CASE_ ) return logits, pooled_output @add_start_docstrings( """Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. """ , __lowerCamelCase , ) class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = config.num_labels UpperCamelCase__ = config.num_hidden_layers UpperCamelCase__ = DeeBertModel(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = nn.Dropout(config.hidden_dropout_prob ) UpperCamelCase__ = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=-1 , SCREAMING_SNAKE_CASE_=False , ): UpperCamelCase__ = self.num_layers try: UpperCamelCase__ = self.bert( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits UpperCamelCase__ = outputs[1] UpperCamelCase__ = self.dropout(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.classifier(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: UpperCamelCase__ = e.message UpperCamelCase__ = e.exit_layer UpperCamelCase__ = outputs[0] if not self.training: UpperCamelCase__ = entropy(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [] UpperCamelCase__ = [] if labels is not None: if self.num_labels == 1: # We are doing regression UpperCamelCase__ = MSELoss() UpperCamelCase__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: UpperCamelCase__ = CrossEntropyLoss() UpperCamelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits UpperCamelCase__ = [] for highway_exit in outputs[-1]: UpperCamelCase__ = highway_exit[0] if not self.training: highway_logits_all.append(SCREAMING_SNAKE_CASE_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression UpperCamelCase__ = MSELoss() UpperCamelCase__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: UpperCamelCase__ = CrossEntropyLoss() UpperCamelCase__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(SCREAMING_SNAKE_CASE_ ) if train_highway: UpperCamelCase__ = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: UpperCamelCase__ = (loss,) + outputs if not self.training: UpperCamelCase__ = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: UpperCamelCase__ = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
86
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = RobertaTokenizer SCREAMING_SNAKE_CASE__ = RobertaTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = {"""cls_token""": """<s>"""} def UpperCAmelCase_ (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCamelCase__ = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) UpperCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCamelCase__ = {"""unk_token""": """<unk>"""} UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE_ ) ) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = """lower newer""" UpperCamelCase__ = """lower newer""" return input_text, output_text def UpperCAmelCase_ (self ): UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCamelCase__ = """lower newer""" UpperCamelCase__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) # , add_prefix_space=True) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokens + [tokenizer.unk_token] UpperCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = self.tokenizer_class.from_pretrained("""roberta-base""" ) UpperCamelCase__ = tokenizer.encode("""sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode( """sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCAmelCase_ (self ): UpperCamelCase__ = self.get_tokenizer() UpperCamelCase__ = """Encode this sequence.""" UpperCamelCase__ = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Testing spaces after special tokens UpperCamelCase__ = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )} ) # mask token has a left space UpperCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """Encode <mask> sequence""" UpperCamelCase__ = """Encode <mask>sequence""" UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): pass def UpperCAmelCase_ (self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """A, <mask> AllenNLP sentence.""" UpperCamelCase__ = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def UpperCAmelCase_ (self ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(post_processor_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(post_processor_state["""trim_offsets"""] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` UpperCamelCase__ = F"{text_of_1_token} {text_of_1_token}" UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ) + 1, 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
86
1
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : int = ['image_processor', 'tokenizer'] lowerCamelCase : Union[str, Any] = 'ViltImageProcessor' lowerCamelCase : List[Any] = ('BertTokenizer', 'BertTokenizerFast') def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]: __UpperCAmelCase =None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =kwargs.pop("""feature_extractor""" ) __UpperCAmelCase =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.image_processor def __call__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , __SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> BatchEncoding: __UpperCAmelCase =self.tokenizer( text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , return_length=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) # add pixel_values + pixel_mask __UpperCAmelCase =self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) encoding.update(__SCREAMING_SNAKE_CASE ) return encoding def _a ( self : str , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Any ) -> str: return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @property def _a ( self : Union[str, Any] ) -> Dict: __UpperCAmelCase =self.tokenizer.model_input_names __UpperCAmelCase =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _a ( self : Dict ) -> Any: warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def _a ( self : str ) -> int: warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __SCREAMING_SNAKE_CASE , ) return self.image_processor
68
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def lowercase__ ( A_: Optional[Any] ) -> Union[str, Any]: """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: __UpperCAmelCase =k.replace(A_ , A_ ) if k.startswith("""encoder""" ): __UpperCAmelCase =k.replace(""".attn""" , """.self_attn""" ) __UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm2""" , """final_layer_norm""" ) elif k.startswith("""decoder""" ): __UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm2""" , """encoder_attn_layer_norm""" ) __UpperCAmelCase =k.replace("""norm3""" , """final_layer_norm""" ) return k def lowercase__ ( A_: Tuple ) -> str: """simple docstring""" __UpperCAmelCase =[ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: __UpperCAmelCase =sd.pop(A_ ) __UpperCAmelCase =k.replace("""layernorm_embedding""" , """layer_norm""" ) assert new_k not in sd __UpperCAmelCase =v __A = ["START"] @torch.no_grad() def lowercase__ ( A_: List[Any] , A_: str , A_: int ) -> Optional[int]: """simple docstring""" __UpperCAmelCase =torch.load(A_ , map_location="""cpu""" ) __UpperCAmelCase =model["""model"""] __UpperCAmelCase =BlenderbotConfig.from_json_file(A_ ) __UpperCAmelCase =BlenderbotForConditionalGeneration(A_ ) __UpperCAmelCase =m.model.state_dict().keys() __UpperCAmelCase =[] __UpperCAmelCase ={} for k, v in sd.items(): if k in IGNORE_KEYS: continue __UpperCAmelCase =rename_state_dict_key(A_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: __UpperCAmelCase =v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(A_ ) m.model.load_state_dict(A_ , strict=A_ ) m.half() m.save_pretrained(A_ ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) __A = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
68
1
from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput __lowerCamelCase = 8 def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase=BITS ) -> int: """simple docstring""" _a : Tuple = x.device _a : Any = (x * 255).int().clamp(0 , 255 ) _a : Any = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ ) _a : int = rearrange(snake_case__ , '''d -> d 1 1''' ) _a : str = rearrange(snake_case__ , '''b c h w -> b c 1 h w''' ) _a : List[Any] = ((x & mask) != 0).float() _a : Optional[Any] = rearrange(snake_case__ , '''b c d h w -> b (c d) h w''' ) _a : Any = bits * 2 - 1 return bits def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase=BITS ) -> List[Any]: """simple docstring""" _a : int = x.device _a : List[str] = (x > 0).int() _a : List[str] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa ) _a : Tuple = rearrange(snake_case__ , '''d -> d 1 1''' ) _a : Optional[int] = rearrange(snake_case__ , '''b (c d) h w -> b c d h w''' , d=8 ) _a : Tuple = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' ) return (dec / 255).clamp(0.0 , 1.0 ) def UpperCamelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = True , UpperCAmelCase=None , UpperCAmelCase = True , ) -> List[Any]: """simple docstring""" if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) _a : Optional[Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas _a : Union[str, Any] = self.alphas_cumprod[timestep] _a : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod _a : Union[str, Any] = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" _a : int = self.bit_scale if self.config.clip_sample: _a : Optional[int] = torch.clamp(snake_case__ , -scale , snake_case__ ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) _a : Any = self._get_variance(snake_case__ , snake_case__ ) _a : Any = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide _a : Dict = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a : Dict = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a : List[str] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 _a : List[str] = model_output.device if torch.is_tensor(snake_case__ ) else '''cpu''' _a : Any = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ ) _a : int = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise _a : Dict = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ ) def UpperCamelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="epsilon" , UpperCAmelCase=None , UpperCAmelCase = True , ) -> str: """simple docstring""" _a : str = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: _a , _a : List[str] = torch.split(snake_case__ , sample.shape[1] , dim=1 ) else: _a : Any = None # 1. compute alphas, betas _a : Dict = self.alphas_cumprod[t] _a : str = self.alphas_cumprod[t - 1] if t > 0 else self.one _a : Dict = 1 - alpha_prod_t _a : int = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": _a : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": _a : Optional[int] = model_output else: raise ValueError(F'Unsupported prediction_type {prediction_type}.' ) # 3. Clip "predicted x_0" _a : Union[str, Any] = self.bit_scale if self.config.clip_sample: _a : Any = torch.clamp(snake_case__ , -scale , snake_case__ ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _a : Dict = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t _a : str = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _a : Optional[int] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _a : int = 0 if t > 0: _a : List[str] = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device ) _a : List[Any] = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise _a : Any = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ ) class UpperCamelCase_ ( _lowercase ): def __init__( self , lowercase , lowercase , lowercase = 1.0 , ) -> Optional[int]: super().__init__() _a : str = bit_scale _a : List[str] = ( ddim_bit_scheduler_step if isinstance(A_ , A_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self , lowercase = 256 , lowercase = 256 , lowercase = 50 , lowercase = None , lowercase = 1 , lowercase = "pil" , lowercase = True , **lowercase , ) -> Union[Tuple, ImagePipelineOutput]: _a : Optional[int] = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=A_ , ) _a : Any = decimal_to_bits(A_ ) * self.bit_scale _a : List[str] = latents.to(self.device ) self.scheduler.set_timesteps(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual _a : Tuple = self.unet(A_ , A_ ).sample # compute the previous noisy sample x_t -> x_t-1 _a : Dict = self.scheduler.step(A_ , A_ , A_ ).prev_sample _a : Optional[int] = bits_to_decimal(A_ ) if output_type == "pil": _a : List[Any] = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
700
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json', } class UpperCamelCase_ ( UpperCamelCase ): lowercase = '''xlnet''' lowercase = ['''mems'''] lowercase = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , lowercase=32_000 , lowercase=1_024 , lowercase=24 , lowercase=16 , lowercase=4_096 , lowercase="gelu" , lowercase=True , lowercase="bi" , lowercase=0.02 , lowercase=1e-12 , lowercase=0.1 , lowercase=512 , lowercase=None , lowercase=True , lowercase=False , lowercase=False , lowercase=-1 , lowercase=False , lowercase="last" , lowercase=True , lowercase="tanh" , lowercase=0.1 , lowercase=5 , lowercase=5 , lowercase=5 , lowercase=1 , lowercase=2 , **lowercase , ) -> Optional[Any]: _a : str = vocab_size _a : int = d_model _a : str = n_layer _a : List[str] = n_head if d_model % n_head != 0: raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' ) _a : Dict = d_model // n_head _a : int = ff_activation _a : List[Any] = d_inner _a : str = untie_r _a : Any = attn_type _a : List[Any] = initializer_range _a : Optional[Any] = layer_norm_eps _a : Optional[Any] = dropout _a : List[str] = mem_len _a : str = reuse_len _a : int = bi_data _a : List[str] = clamp_len _a : List[str] = same_length _a : List[str] = summary_type _a : List[str] = summary_use_proj _a : List[Any] = summary_activation _a : int = summary_last_dropout _a : List[str] = start_n_top _a : Optional[Any] = end_n_top _a : Tuple = bos_token_id _a : Optional[int] = pad_token_id _a : Dict = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , lowercase , ) _a : Union[str, Any] = kwargs['''use_cache'''] _a : int = use_mems_eval _a : Optional[int] = use_mems_train super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) @property def snake_case__( self ) -> Optional[int]: logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' ) return -1 @max_position_embeddings.setter def snake_case__( self , lowercase ) -> Any: # Message copied from Transformer-XL documentation raise NotImplementedError( F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
307
0
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowercase__ : List[str] = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Optional[Any]: warnings.warn(snake_case__ , snake_case__ ) requires_backends(snake_case__ , '''sklearn''' ) return (preds == labels).mean() def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> List[Any]: warnings.warn(snake_case__ , snake_case__ ) requires_backends(snake_case__ , '''sklearn''' ) lowerCAmelCase = simple_accuracy(snake_case__ , snake_case__ ) lowerCAmelCase = fa_score(y_true=snake_case__ , y_pred=snake_case__ ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Optional[Any]: warnings.warn(snake_case__ , snake_case__ ) requires_backends(snake_case__ , '''sklearn''' ) lowerCAmelCase = pearsonr(snake_case__ , snake_case__ )[0] lowerCAmelCase = spearmanr(snake_case__ , snake_case__ )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int: warnings.warn(snake_case__ , snake_case__ ) requires_backends(snake_case__ , '''sklearn''' ) assert len(snake_case__ ) == len(snake_case__ ), f"Predictions and labels have mismatched lengths {len(snake_case__ )} and {len(snake_case__ )}" if task_name == "cola": return {"mcc": matthews_corrcoef(snake_case__ , snake_case__ )} elif task_name == "sst-2": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "mrpc": return acc_and_fa(snake_case__ , snake_case__ ) elif task_name == "sts-b": return pearson_and_spearman(snake_case__ , snake_case__ ) elif task_name == "qqp": return acc_and_fa(snake_case__ , snake_case__ ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "qnli": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "rte": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "wnli": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "hans": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} else: raise KeyError(snake_case__ ) def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int: warnings.warn(snake_case__ , snake_case__ ) requires_backends(snake_case__ , '''sklearn''' ) if len(snake_case__ ) != len(snake_case__ ): raise ValueError(f"Predictions and labels have mismatched lengths {len(snake_case__ )} and {len(snake_case__ )}" ) if task_name == "xnli": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} else: raise KeyError(snake_case__ )
312
import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , **snake_case__ ) -> Any: lowerCAmelCase = AutoConfig.from_pretrained(snake_case__ , **snake_case__ ) lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(snake_case__ ) model.save_pretrained(snake_case__ ) AutoTokenizer.from_pretrained(snake_case__ ).save_pretrained(snake_case__ ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
312
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a : Optional[Any] = logging.get_logger(__name__) __a : Dict = '▁' __a : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'} __a : Union[str, Any] = { 'vocab_file': { 'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model', } } __a : Any = { 'facebook/xglm-564M': 2048, } class _SCREAMING_SNAKE_CASE ( __snake_case ): """simple docstring""" _SCREAMING_SNAKE_CASE =VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE =PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE =['input_ids', 'attention_mask'] def __init__( self: Union[str, Any] , __A: str , __A: Tuple="<s>" , __A: List[str]="</s>" , __A: List[Any]="</s>" , __A: str="<s>" , __A: str="<unk>" , __A: int="<pad>" , __A: Optional[Dict[str, Any]] = None , **__A: Dict , ): '''simple docstring''' a__ = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer a__ = 7 a__ = [F'<madeupword{i}>' for i in range(self.num_madeup_words )] a__ = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , ) a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__A ) ) a__ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab a__ = 1 # Mimic fairseq token-to-id alignment for the first 4 token a__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} a__ = len(self.sp_model ) a__ = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(__A ) a__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self: Optional[Any] ): '''simple docstring''' a__ = self.__dict__.copy() a__ = None a__ = self.sp_model.serialized_model_proto() return state def __setstate__( self: Any , __A: Any ): '''simple docstring''' a__ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): a__ = {} a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowercase ( self: str , __A: List[int] , __A: Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a a__ = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def lowercase ( self: Optional[int] , __A: List[int] , __A: Optional[List[int]] = None , __A: bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) def lowercase ( self: int , __A: List[int] , __A: Optional[List[int]] = None ): '''simple docstring''' a__ = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def lowercase ( self: List[Any] ): '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def lowercase ( self: Optional[int] ): '''simple docstring''' a__ = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase ( self: Optional[int] , __A: str ): '''simple docstring''' return self.sp_model.encode(__A , out_type=__A ) def lowercase ( self: Dict , __A: Optional[Any] ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] a__ = self.sp_model.PieceToId(__A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase ( self: Dict , __A: int ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase ( self: Optional[Any] , __A: int ): '''simple docstring''' a__ = ''''''.join(__A ).replace(__A , ''' ''' ).strip() return out_string def lowercase ( self: Dict , __A: str , __A: Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__A ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return a__ = os.path.join( __A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __A ) elif not os.path.isfile(self.vocab_file ): with open(__A , '''wb''' ) as fi: a__ = self.sp_model.serialized_model_proto() fi.write(__A ) return (out_vocab_file,)
703
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __a : Optional[Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowerCamelCase_): a__ = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: a__ = [144, 192, 240] a__ = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: a__ = [96, 120, 144] a__ = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: a__ = [64, 80, 96] a__ = [16, 16, 24, 48, 64, 80, 320] a__ = 0.05 a__ = 2.0 if mobilevit_name.startswith('''deeplabv3_'''): a__ = 512 a__ = 16 a__ = 21 a__ = '''pascal-voc-id2label.json''' else: a__ = 1000 a__ = '''imagenet-1k-id2label.json''' a__ = '''huggingface/label-files''' a__ = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''') , '''r''')) a__ = {int(lowerCamelCase_): v for k, v in idalabel.items()} a__ = idalabel a__ = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_=False): for i in range(1 , 6): if f'layer_{i}.' in name: a__ = name.replace(f'layer_{i}.' , f'encoder.layer.{i - 1}.') if "conv_1." in name: a__ = name.replace('''conv_1.''' , '''conv_stem.''') if ".block." in name: a__ = name.replace('''.block.''' , '''.''') if "exp_1x1" in name: a__ = name.replace('''exp_1x1''' , '''expand_1x1''') if "red_1x1" in name: a__ = name.replace('''red_1x1''' , '''reduce_1x1''') if ".local_rep.conv_3x3." in name: a__ = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''') if ".local_rep.conv_1x1." in name: a__ = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''') if ".norm." in name: a__ = name.replace('''.norm.''' , '''.normalization.''') if ".conv." in name: a__ = name.replace('''.conv.''' , '''.convolution.''') if ".conv_proj." in name: a__ = name.replace('''.conv_proj.''' , '''.conv_projection.''') for i in range(0 , 2): for j in range(0 , 4): if f'.{i}.{j}.' in name: a__ = name.replace(f'.{i}.{j}.' , f'.{i}.layer.{j}.') for i in range(2 , 6): for j in range(0 , 4): if f'.{i}.{j}.' in name: a__ = name.replace(f'.{i}.{j}.' , f'.{i}.') if "expand_1x1" in name: a__ = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''') if "conv_3x3" in name: a__ = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''') if "reduce_1x1" in name: a__ = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''') for i in range(2 , 5): if f'.global_rep.{i}.weight' in name: a__ = name.replace(f'.global_rep.{i}.weight' , '''.layernorm.weight''') if f'.global_rep.{i}.bias' in name: a__ = name.replace(f'.global_rep.{i}.bias' , '''.layernorm.bias''') if ".global_rep." in name: a__ = name.replace('''.global_rep.''' , '''.transformer.''') if ".pre_norm_mha.0." in name: a__ = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''') if ".pre_norm_mha.1.out_proj." in name: a__ = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''') if ".pre_norm_ffn.0." in name: a__ = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''') if ".pre_norm_ffn.1." in name: a__ = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''') if ".pre_norm_ffn.4." in name: a__ = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''') if ".transformer." in name: a__ = name.replace('''.transformer.''' , '''.transformer.layer.''') if ".aspp_layer." in name: a__ = name.replace('''.aspp_layer.''' , '''.''') if ".aspp_pool." in name: a__ = name.replace('''.aspp_pool.''' , '''.''') if "seg_head." in name: a__ = name.replace('''seg_head.''' , '''segmentation_head.''') if "segmentation_head.classifier.classifier." in name: a__ = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''') if "classifier.fc." in name: a__ = name.replace('''classifier.fc.''' , '''classifier.''') elif (not base_model) and ("segmentation_head." not in name): a__ = '''mobilevit.''' + name return name def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False): if base_model: a__ = '''''' else: a__ = '''mobilevit.''' for key in orig_state_dict.copy().keys(): a__ = orig_state_dict.pop(lowerCamelCase_) if key[:8] == "encoder.": a__ = key[8:] if "qkv" in key: a__ = key.split('''.''') a__ = int(key_split[0][6:]) - 1 a__ = int(key_split[3]) a__ = model.get_submodule(f'{model_prefix}encoder.layer.{layer_num}') a__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size a__ = ( f'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.' ) if "weight" in key: a__ = val[:dim, :] a__ = val[dim : dim * 2, :] a__ = val[-dim:, :] else: a__ = val[:dim] a__ = val[dim : dim * 2] a__ = val[-dim:] else: a__ = val return orig_state_dict def SCREAMING_SNAKE_CASE ( ): a__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' a__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw) return im @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False): a__ = get_mobilevit_config(lowerCamelCase_) # load original state_dict a__ = torch.load(lowerCamelCase_ , map_location='''cpu''') # load 🤗 model if mobilevit_name.startswith('''deeplabv3_'''): a__ = MobileViTForSemanticSegmentation(lowerCamelCase_).eval() else: a__ = MobileViTForImageClassification(lowerCamelCase_).eval() a__ = convert_state_dict(lowerCamelCase_ , lowerCamelCase_) model.load_state_dict(lowerCamelCase_) # Check outputs on an image, prepared by MobileViTImageProcessor a__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32) a__ = image_processor(images=prepare_img() , return_tensors='''pt''') a__ = model(**lowerCamelCase_) a__ = outputs.logits if mobilevit_name.startswith('''deeplabv3_'''): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": a__ = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xs": a__ = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xxs": a__ = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ]) else: raise ValueError(f'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase_ , atol=1E-4) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": a__ = torch.tensor([-0.9866, 0.2392, -1.1241]) elif mobilevit_name == "mobilevit_xs": a__ = torch.tensor([-2.4761, -0.9399, -1.9587]) elif mobilevit_name == "mobilevit_xxs": a__ = torch.tensor([-1.9364, -1.2327, -0.4653]) else: raise ValueError(f'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3] , lowerCamelCase_ , atol=1E-4) Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_) print(f'Saving model {mobilevit_name} to {pytorch_dump_folder_path}') model.save_pretrained(lowerCamelCase_) print(f'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(lowerCamelCase_) if push_to_hub: a__ = { '''mobilevit_s''': '''mobilevit-small''', '''mobilevit_xs''': '''mobilevit-x-small''', '''mobilevit_xxs''': '''mobilevit-xx-small''', '''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''', '''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''', '''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''', } print('''Pushing to the hub...''') a__ = model_mapping[mobilevit_name] image_processor.push_to_hub(lowerCamelCase_ , organization='''apple''') model.push_to_hub(lowerCamelCase_ , organization='''apple''') if __name__ == "__main__": __a : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--mobilevit_name', default='mobilevit_s', type=str, help=( 'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',' ' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.' ), ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __a : List[str] = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
200
0
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=2 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=36 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=6 , lowerCamelCase__=6 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , lowerCamelCase__=1_000 , ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = num_channels __lowerCamelCase = image_size __lowerCamelCase = patch_size __lowerCamelCase = text_seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = coordinate_size __lowerCamelCase = shape_size __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope __lowerCamelCase = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __lowerCamelCase = text_seq_length __lowerCamelCase = (image_size // patch_size) ** 2 + 1 __lowerCamelCase = self.text_seq_length + self.image_seq_length def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __lowerCamelCase = bbox[i, j, 3] __lowerCamelCase = bbox[i, j, 1] __lowerCamelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: __lowerCamelCase = bbox[i, j, 2] __lowerCamelCase = bbox[i, j, 0] __lowerCamelCase = t __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.text_seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __lowerCamelCase = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: '''simple docstring''' __lowerCamelCase = LayoutLMvaModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() # text + image __lowerCamelCase = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ ) __lowerCamelCase = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __lowerCamelCase = model(lowerCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __lowerCamelCase = model(pixel_values=lowerCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = LayoutLMvaForSequenceClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str: '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = LayoutLMvaForTokenClassification(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str: '''simple docstring''' __lowerCamelCase = LayoutLMvaForQuestionAnswering(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model( lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) snake_case_ = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = LayoutLMvaModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple: '''simple docstring''' __lowerCamelCase = copy.deepcopy(lowerCamelCase__ ) if model_class in get_values(lowerCamelCase__ ): __lowerCamelCase = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(lowerCamelCase__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(lowerCamelCase__ ): __lowerCamelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ ) elif model_class in get_values(lowerCamelCase__ ): __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ ) __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ ) elif model_class in [ *get_values(lowerCamelCase__ ), ]: __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ ) elif model_class in [ *get_values(lowerCamelCase__ ), ]: __lowerCamelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase__ , ) return inputs_dict def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCamelCase = type self.model_tester.create_and_check_model(*lowerCamelCase__ ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ ) @slow def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = LayoutLMvaModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def lowerCamelCase_ ( ) -> Union[str, Any]: """simple docstring""" __lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None @slow def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(lowerCamelCase__ ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).pixel_values.to(lowerCamelCase__ ) __lowerCamelCase = torch.tensor([[1, 2]] ) __lowerCamelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass __lowerCamelCase = model( input_ids=input_ids.to(lowerCamelCase__ ) , bbox=bbox.to(lowerCamelCase__ ) , pixel_values=pixel_values.to(lowerCamelCase__ ) , ) # verify the logits __lowerCamelCase = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ ) __lowerCamelCase = torch.tensor( [[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
469
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) __lowerCamelCase = get_activation('gelu' ) self.assertTrue(torch.allclose(gelu_python(lowerCamelCase__ ) , torch_builtin(lowerCamelCase__ ) ) ) self.assertFalse(torch.allclose(gelu_python(lowerCamelCase__ ) , gelu_new(lowerCamelCase__ ) ) ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) __lowerCamelCase = get_activation('gelu' ) __lowerCamelCase = get_activation('gelu_10' ) __lowerCamelCase = torch_builtin(lowerCamelCase__ ) __lowerCamelCase = geluaa(lowerCamelCase__ ) __lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(lowerCamelCase__ ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def lowercase_ ( self ) -> Any: '''simple docstring''' get_activation('gelu' ) get_activation('gelu_10' ) get_activation('gelu_fast' ) get_activation('gelu_new' ) get_activation('gelu_python' ) get_activation('gelu_pytorch_tanh' ) get_activation('linear' ) get_activation('mish' ) get_activation('quick_gelu' ) get_activation('relu' ) get_activation('sigmoid' ) get_activation('silu' ) get_activation('swish' ) get_activation('tanh' ) with self.assertRaises(lowerCamelCase__ ): get_activation('bogus' ) with self.assertRaises(lowerCamelCase__ ): get_activation(lowerCamelCase__ ) def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = get_activation('gelu' ) __lowerCamelCase = 1 __lowerCamelCase = get_activation('gelu' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(lowerCamelCase__ ): __lowerCamelCase = acta.a
469
1
'''simple docstring''' from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge UpperCamelCase : str = [ 'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the' ' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe' ' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.', 'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal' ' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s' ' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the' ' body.', 'Amnesty International releases its annual report on the death penalty. The report catalogs the use of' ' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the' ' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital' ' punishment.', ] UpperCamelCase : List[str] = [ 'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .' ' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz' ' had informed his Lufthansa training school of an episode of severe depression, airline says .', 'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .' ' Israel and the United States opposed the move, which could open the door to war crimes investigations against' ' Israelis .', 'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to' ' death . Organization claims that governments around the world are using the threat of terrorism to advance' ' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death' ' sentences up by 28% .', ] def A__ ( ): lowerCamelCase__ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=["""rouge2""", """rougeL"""] ) assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=["""rouge2"""] ) assert ( pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean() ) def A__ ( ): lowerCamelCase__ = """rougeLsum""" lowerCamelCase__ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k] lowerCamelCase__ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k] assert score > score_no_sep def A__ ( ): lowerCamelCase__ = ["""rouge1""", """rouge2""", """rougeL"""] lowerCamelCase__ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase ) lowerCamelCase__ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase ) assert score_sep == score_no_sep def A__ ( ): lowerCamelCase__ = [ """Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""", """Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""", ] lowerCamelCase__ = [ """Margot Frank, died in 1945, a month earlier than previously thought.""", """Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of""" """ the final seconds on board Flight 9525.""", ] assert calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase ) == calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase ) def A__ ( ): lowerCamelCase__ = [ """\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """ ] lowerCamelCase__ = [ """ Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .""" ] lowerCamelCase__ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=["""rougeLsum"""] , newline_sep=__lowerCAmelCase )["""rougeLsum"""] lowerCamelCase__ = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""] assert new_score > prev_score def A__ ( ): lowerCamelCase__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" ) lowerCamelCase__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) ) assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = calculate_rouge_path( data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=__lowerCAmelCase ) assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
9
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py UpperCamelCase : Optional[Any] = 'src/diffusers' # Matches is_xxx_available() UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') UpperCamelCase : Optional[int] = '\n{0} = None\n' UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def A__ ( __lowerCAmelCase : Union[str, Any] ): lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase ) if len(__lowerCAmelCase ) == 0: return None return "_and_".join(__lowerCAmelCase ) def A__ ( ): with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase__ = 0 lowerCamelCase__ = {} # Go through the end of the file while line_index < len(__lowerCAmelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase__ = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 lowerCamelCase__ = [] # Until we unindent, add backend objects to the list while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1: lowerCamelCase__ = lines[line_index] lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__lowerCAmelCase ) > 0: lowerCamelCase__ = objects else: line_index += 1 return backend_specific_objects def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ): if name.isupper(): return DUMMY_CONSTANT.format(__lowerCAmelCase ) elif name.islower(): return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase ) else: return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( __lowerCAmelCase : Optional[int]=None ): if backend_specific_objects is None: lowerCamelCase__ = read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase__ = {} for backend, objects in backend_specific_objects.items(): lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]""" lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] ) lowerCamelCase__ = dummy_file return dummy_files def A__ ( __lowerCAmelCase : List[str]=False ): lowerCamelCase__ = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase__ = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" ) lowerCamelCase__ = { backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' ) for backend in dummy_files.keys() } lowerCamelCase__ = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__lowerCAmelCase ): with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.read() else: lowerCamelCase__ = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main ''' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` ''' """to fix this.""" ) if __name__ == "__main__": UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') UpperCamelCase : Any = parser.parse_args() check_dummies(args.fix_and_overwrite)
9
1
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation A: List[Any] = logging.get_logger(__name__) A: Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} A: Dict = { "tokenizer_file": { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json", }, } A: List[Any] = { "gpt-neox-20b": 2_0_4_8, } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : Dict = VOCAB_FILES_NAMES __lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : Optional[Any] = ['input_ids', 'attention_mask'] def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Any: '''simple docstring''' super().__init__( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , _SCREAMING_SNAKE_CASE ) != add_prefix_space: UpperCAmelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop("""type""" ) ) UpperCAmelCase : Optional[Any] = add_prefix_space UpperCAmelCase : List[str] = pre_tok_class(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = add_prefix_space def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: '''simple docstring''' UpperCAmelCase : Tuple = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE ) return tuple(_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[int]: '''simple docstring''' UpperCAmelCase : Union[str, Any] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) + [self.eos_token_id] ) if len(_SCREAMING_SNAKE_CASE ) > self.model_max_length: UpperCAmelCase : Optional[int] = input_ids[-self.model_max_length :] return input_ids
160
"""simple docstring""" import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version A: Union[str, Any] = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize A: Tuple = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" A: Tuple = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" A: Any = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[ """https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""", """https://en.wikipedia.org/wiki/METEOR""", ] , ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' import nltk nltk.download("""wordnet""" ) if NLTK_VERSION >= version.Version("""3.6.5""" ): nltk.download("""punkt""" ) if NLTK_VERSION >= version.Version("""3.6.6""" ): nltk.download("""omw-1.4""" ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.5 ) -> Optional[Any]: '''simple docstring''' if NLTK_VERSION >= version.Version("""3.6.5""" ): UpperCAmelCase : int = [ meteor_score.single_meteor_score( word_tokenize(_SCREAMING_SNAKE_CASE ) , word_tokenize(_SCREAMING_SNAKE_CASE ) , alpha=_SCREAMING_SNAKE_CASE , beta=_SCREAMING_SNAKE_CASE , gamma=_SCREAMING_SNAKE_CASE ) for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ] else: UpperCAmelCase : List[str] = [ meteor_score.single_meteor_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , alpha=_SCREAMING_SNAKE_CASE , beta=_SCREAMING_SNAKE_CASE , gamma=_SCREAMING_SNAKE_CASE ) for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ] return {"meteor": np.mean(_SCREAMING_SNAKE_CASE )}
160
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> Optional[Any]: if isinstance(UpperCamelCase ,UpperCamelCase ) and isinstance(UpperCamelCase ,UpperCamelCase ): UpperCAmelCase_ : Tuple = len(set_a.intersection(UpperCamelCase ) ) if alternative_union: UpperCAmelCase_ : Tuple = len(UpperCamelCase ) + len(UpperCamelCase ) else: UpperCAmelCase_ : str = len(set_a.union(UpperCamelCase ) ) return intersection / union if isinstance(UpperCamelCase ,(list, tuple) ) and isinstance(UpperCamelCase ,(list, tuple) ): UpperCAmelCase_ : Dict = [element for element in set_a if element in set_b] if alternative_union: UpperCAmelCase_ : Optional[Any] = len(UpperCamelCase ) + len(UpperCamelCase ) return len(UpperCamelCase ) / union else: UpperCAmelCase_ : Optional[int] = set_a + [element for element in set_b if element not in set_a] return len(UpperCamelCase ) / len(UpperCamelCase ) return len(UpperCamelCase ) / len(UpperCamelCase ) return None if __name__ == "__main__": lowerCAmelCase__ = {"a", "b", "c", "d", "e"} lowerCAmelCase__ = {"c", "d", "e", "f", "h", "i"} print(jaccard_similarity(set_a, set_b))
471
'''simple docstring''' from __future__ import annotations from typing import Any class lowercase ( a_ ): pass class lowercase : def __init__( self , _snake_case) -> None: UpperCAmelCase_ : Any = data UpperCAmelCase_ : Node | None = None def __iter__( self) -> Optional[int]: UpperCAmelCase_ : int = self UpperCAmelCase_ : List[str] = [] while node: if node in visited: raise ContainsLoopError visited.append(_snake_case) yield node.data UpperCAmelCase_ : Tuple = node.next_node @property def _snake_case ( self) -> bool: try: list(self) return False except ContainsLoopError: return True if __name__ == "__main__": lowerCAmelCase__ = Node(1) lowerCAmelCase__ = Node(2) lowerCAmelCase__ = Node(3) lowerCAmelCase__ = Node(4) print(root_node.has_loop) # False lowerCAmelCase__ = root_node.next_node print(root_node.has_loop) # True lowerCAmelCase__ = Node(5) lowerCAmelCase__ = Node(6) lowerCAmelCase__ = Node(5) lowerCAmelCase__ = Node(6) print(root_node.has_loop) # False lowerCAmelCase__ = Node(1) print(root_node.has_loop) # False
471
1
"""simple docstring""" import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib __magic_name__ = threading.Lock() __magic_name__ = None __magic_name__ = { '''debug''': logging.DEBUG, '''info''': logging.INFO, '''warning''': logging.WARNING, '''error''': logging.ERROR, '''critical''': logging.CRITICAL, } __magic_name__ = logging.WARNING __magic_name__ = True def _lowerCamelCase ( ) -> str: '''simple docstring''' a__ = os.getenv('TRANSFORMERS_VERBOSITY',__a ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, ''' f'''has to be one of: { ", ".join(log_levels.keys() ) }''' ) return _default_log_level def _lowerCamelCase ( ) -> str: '''simple docstring''' return __name__.split('.' )[0] def _lowerCamelCase ( ) -> logging.Logger: '''simple docstring''' return logging.getLogger(_get_library_name() ) def _lowerCamelCase ( ) -> None: '''simple docstring''' global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return a__ = logging.StreamHandler() # Set sys.stderr as stream. a__ = sys.stderr.flush # Apply our default configuration to the library root logger. a__ = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) a__ = False def _lowerCamelCase ( ) -> None: '''simple docstring''' global _default_handler with _lock: if not _default_handler: return a__ = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) a__ = None def _lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' return log_levels def _lowerCamelCase ( UpperCAmelCase__ = None ) -> logging.Logger: '''simple docstring''' if name is None: a__ = _get_library_name() _configure_library_root_logger() return logging.getLogger(__a ) def _lowerCamelCase ( ) -> int: '''simple docstring''' _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def _lowerCamelCase ( UpperCAmelCase__ ) -> None: '''simple docstring''' _configure_library_root_logger() _get_library_root_logger().setLevel(__a ) def _lowerCamelCase ( ) -> str: '''simple docstring''' return set_verbosity(__a ) def _lowerCamelCase ( ) -> int: '''simple docstring''' return set_verbosity(__a ) def _lowerCamelCase ( ) -> Dict: '''simple docstring''' return set_verbosity(__a ) def _lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' return set_verbosity(__a ) def _lowerCamelCase ( ) -> None: '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def _lowerCamelCase ( ) -> None: '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def _lowerCamelCase ( UpperCAmelCase__ ) -> None: '''simple docstring''' _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(__a ) def _lowerCamelCase ( UpperCAmelCase__ ) -> None: '''simple docstring''' _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(__a ) def _lowerCamelCase ( ) -> None: '''simple docstring''' _configure_library_root_logger() a__ = False def _lowerCamelCase ( ) -> None: '''simple docstring''' _configure_library_root_logger() a__ = True def _lowerCamelCase ( ) -> None: '''simple docstring''' a__ = _get_library_root_logger().handlers for handler in handlers: a__ = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' ) handler.setFormatter(__a ) def _lowerCamelCase ( ) -> None: '''simple docstring''' a__ = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(__a ) def _lowerCamelCase ( self,*UpperCAmelCase__,**UpperCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' a__ = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS',__a ) if no_advisory_warnings: return self.warning(*__a,**__a ) __magic_name__ = warning_advice @functools.lru_cache(__a ) def _lowerCamelCase ( self,*UpperCAmelCase__,**UpperCAmelCase__ ) -> str: '''simple docstring''' self.warning(*__a,**__a ) __magic_name__ = warning_once class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : List[str] , *_snake_case : Any , **_snake_case : int ) -> List[str]: # pylint: disable=unused-argument '''simple docstring''' a__ = args[0] if args else None def __iter__( self : Optional[Any] ) -> Any: '''simple docstring''' return iter(self._iterator ) def __getattr__( self : Union[str, Any] , _snake_case : Tuple ) -> int: '''simple docstring''' def empty_fn(*_snake_case : Union[str, Any] , **_snake_case : Tuple ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' return self def __exit__( self : List[Any] , _snake_case : int , _snake_case : List[Any] , _snake_case : Optional[int] ) -> Optional[Any]: '''simple docstring''' return class SCREAMING_SNAKE_CASE : """simple docstring""" def __call__( self : str , *_snake_case : Any , **_snake_case : Tuple ) -> Optional[Any]: '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm(*__lowerCAmelCase , **__lowerCAmelCase ) else: return EmptyTqdm(*__lowerCAmelCase , **__lowerCAmelCase ) def _lowerCAmelCase ( self : List[str] , *_snake_case : str , **_snake_case : Any ) -> int: '''simple docstring''' a__ = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*__lowerCAmelCase , **__lowerCAmelCase ) def _lowerCAmelCase ( self : str ) -> Dict: '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm.get_lock() __magic_name__ = _tqdm_cls() def _lowerCamelCase ( ) -> bool: '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def _lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' global _tqdm_active a__ = True hf_hub_utils.enable_progress_bars() def _lowerCamelCase ( ) -> Any: '''simple docstring''' global _tqdm_active a__ = False hf_hub_utils.disable_progress_bars()
232
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A : Optional[Any] = '''pt''' elif is_tf_available(): A : List[Any] = '''tf''' else: A : Union[str, Any] = '''jax''' class A (SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : str = ByTaTokenizer __lowerCamelCase : Tuple = False def a_ ( self : Dict ) -> Dict: """simple docstring""" super().setUp() A__ = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def a_ ( self : Dict ) -> List[Any]: """simple docstring""" return ByTaTokenizer.from_pretrained("""google/byt5-small""" ) def a_ ( self : Union[str, Any] , **__lowerCAmelCase : int ) -> ByTaTokenizer: """simple docstring""" return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=20 , __lowerCAmelCase : List[str]=5 ) -> Tuple[str, list]: """simple docstring""" A__ = [] for i in range(len(__lowerCAmelCase ) ): try: A__ = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) A__ = list(filter(lambda __lowerCAmelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , __lowerCAmelCase ) ) A__ = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCAmelCase ) , __lowerCAmelCase ) ) if max_length is not None and len(__lowerCAmelCase ) > max_length: A__ = toks[:max_length] if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0: while len(__lowerCAmelCase ) < min_length: A__ = toks + toks # toks_str = [t[1] for t in toks] A__ = [t[0] for t in toks] # Ensure consistency A__ = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) if " " not in output_txt and len(__lowerCAmelCase ) > 1: A__ = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase ) + """ """ + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase ) ) if with_prefix_space: A__ = """ """ + output_txt A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) return output_txt, output_ids def a_ ( self : List[str] ) -> int: """simple docstring""" A__ = self.ta_base_tokenizer A__ = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] ) A__ = tokenizer(["""hi""", """I went to the gym""", """"""] ) self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] ) def a_ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" A__ = self.ta_base_tokenizer A__ = """Unicode €.""" A__ = tokenizer(__lowerCAmelCase ) A__ = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1] self.assertEqual(encoded["""input_ids"""] , __lowerCAmelCase ) # decoding A__ = tokenizer.decode(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , """Unicode €.</s>""" ) A__ = tokenizer("""e è é ê ë""" ) A__ = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1] self.assertEqual(encoded["""input_ids"""] , __lowerCAmelCase ) # decoding A__ = tokenizer.decode(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , """e è é ê ë</s>""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" ) def a_ ( self : Dict ) -> Optional[int]: """simple docstring""" A__ = self.ta_base_tokenizer A__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] # fmt: off A__ = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0] # fmt: on A__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) if FRAMEWORK != "jax": A__ = list(batch.input_ids.numpy()[0] ) else: A__ = list(batch.input_ids.tolist()[0] ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def a_ ( self : Optional[Any] ) -> List[str]: """simple docstring""" A__ = self.ta_base_tokenizer A__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] A__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""" , __lowerCAmelCase ) self.assertIn("""attention_mask""" , __lowerCAmelCase ) self.assertNotIn("""decoder_input_ids""" , __lowerCAmelCase ) self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase ) def a_ ( self : int ) -> Any: """simple docstring""" A__ = self.ta_base_tokenizer A__ = [ """Summary of the text.""", """Another summary.""", ] A__ = tokenizer( text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def a_ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" A__ = self.ta_base_tokenizer A__ = ["""A long paragraph for summarization. </s>"""] A__ = ["""Summary of the text. </s>"""] # fmt: off A__ = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1] A__ = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1] # fmt: on A__ = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , batch["""input_ids"""][0] ) self.assertEqual(__lowerCAmelCase , batch["""labels"""][0] ) def a_ ( self : str ) -> Dict: """simple docstring""" A__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test A__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc A__ = tempfile.mkdtemp() A__ = """ He is very happy, UNwant\u00E9d,running""" A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) tokenizer.save_pretrained(__lowerCAmelCase ) A__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase ) A__ = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) shutil.rmtree(__lowerCAmelCase ) A__ = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc A__ = tempfile.mkdtemp() A__ = """ He is very happy, UNwant\u00E9d,running""" tokenizer.add_tokens(["""bim""", """bambam"""] ) A__ = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) tokenizer.save_pretrained(__lowerCAmelCase ) A__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase ) A__ = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) A__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__lowerCAmelCase ) def a_ ( self : Optional[int] ) -> str: """simple docstring""" A__ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__lowerCAmelCase ) with open(os.path.join(__lowerCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: A__ = json.load(__lowerCAmelCase ) with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: A__ = json.load(__lowerCAmelCase ) A__ = [f'<extra_id_{i}>' for i in range(1_25 )] A__ = added_tokens_extra_ids + [ """an_additional_special_token""" ] A__ = added_tokens_extra_ids + [ """an_additional_special_token""" ] with open(os.path.join(__lowerCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__lowerCAmelCase , __lowerCAmelCase ) with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__lowerCAmelCase , __lowerCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files A__ = tokenizer_class.from_pretrained( __lowerCAmelCase , ) self.assertIn( """an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained A__ = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=__lowerCAmelCase )] A__ = tokenizer_class.from_pretrained( __lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , ) self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , ) def a_ ( self : Tuple ) -> Optional[Any]: """simple docstring""" A__ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__lowerCAmelCase ) A__ = tokenizer_class.from_pretrained(__lowerCAmelCase ) self.assertTrue(tokenizer.decode([2_55] ) == """""" ) def a_ ( self : List[Any] ) -> Tuple: """simple docstring""" pass def a_ ( self : Optional[int] ) -> Any: """simple docstring""" pass def a_ ( self : int ) -> Optional[Any]: """simple docstring""" pass def a_ ( self : str ) -> Optional[int]: """simple docstring""" pass def a_ ( self : int ) -> Dict: """simple docstring""" A__ = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): A__ = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""] A__ = tokenizer.convert_tokens_to_string(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def a_ ( self : List[Any] ) -> int: """simple docstring""" A__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): A__ = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] A__ = 0 A__ = tokenizer.convert_ids_to_tokens( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) for attr in attributes_list: setattr(__lowerCAmelCase , attr + """_id""" , __lowerCAmelCase ) self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase ) self.assertEqual(getattr(__lowerCAmelCase , attr + """_id""" ) , __lowerCAmelCase ) setattr(__lowerCAmelCase , attr + """_id""" , __lowerCAmelCase ) self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase ) self.assertEqual(getattr(__lowerCAmelCase , attr + """_id""" ) , __lowerCAmelCase ) setattr(__lowerCAmelCase , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens_ids""" ) , [] ) setattr(__lowerCAmelCase , """additional_special_tokens_ids""" , [token_id_to_test_setters] ) self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens""" ) , [token_to_test_setters] ) self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
176
0
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class A__ ( __magic_name__ ): def __init__( self : int , a : List[str] , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = params lowerCAmelCase__ : Union[str, Any] = np.array(a ) lowerCAmelCase__ : List[Any] = np.array([len(a ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : str , a : List[str] ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self : Optional[int] ): '''simple docstring''' return len(self.lengths ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.params.max_model_input_size lowerCAmelCase__ : Optional[int] = self.lengths > max_len logger.info(f'''Splitting {sum(a )} too long sequences.''' ) def divide_chunks(a : List[str] , a : Tuple ): return [l[i : i + n] for i in range(0 , len(a ) , a )] lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : Union[str, Any] = [] if self.params.mlm: lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: lowerCAmelCase__ , lowerCAmelCase__ : int = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: lowerCAmelCase__ : Optional[int] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: lowerCAmelCase__ : Dict = np.insert(a , 0 , a ) if sub_s[-1] != sep_id: lowerCAmelCase__ : Dict = np.insert(a , len(a ) , a ) assert len(a ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(a ) new_tok_ids.extend(a ) new_lengths.extend([len(a ) for l in sub_seqs] ) lowerCAmelCase__ : str = np.array(a ) lowerCAmelCase__ : Optional[Any] = np.array(a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = len(self ) lowerCAmelCase__ : List[Any] = self.lengths > 11 lowerCAmelCase__ : Dict = self.token_ids[indices] lowerCAmelCase__ : Tuple = self.lengths[indices] lowerCAmelCase__ : Any = len(self ) logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: lowerCAmelCase__ : int = self.params.special_tok_ids['unk_token'] lowerCAmelCase__ : str = len(self ) lowerCAmelCase__ : List[str] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) lowerCAmelCase__ : int = (unk_occs / self.lengths) < 0.5 lowerCAmelCase__ : List[str] = self.token_ids[indices] lowerCAmelCase__ : Optional[Any] = self.lengths[indices] lowerCAmelCase__ : Union[str, Any] = len(self ) logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' if not self.params.is_master: return logger.info(f'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def _lowerCamelCase ( self : int , a : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = [t[0] for t in batch] lowerCAmelCase__ : List[str] = [t[1] for t in batch] assert len(a ) == len(a ) # Max for paddings lowerCAmelCase__ : List[str] = max(a ) # Pad token ids if self.params.mlm: lowerCAmelCase__ : str = self.params.special_tok_ids['pad_token'] else: lowerCAmelCase__ : Optional[int] = self.params.special_tok_ids['unk_token'] lowerCAmelCase__ : Tuple = [list(t.astype(a ) ) + [pad_idx] * (max_seq_len_ - len(a )) for t in token_ids] assert len(tk_ ) == len(a ) assert all(len(a ) == max_seq_len_ for t in tk_ ) lowerCAmelCase__ : Union[str, Any] = torch.tensor(tk_ ) # (bs, max_seq_len_) lowerCAmelCase__ : List[str] = torch.tensor(a ) # (bs) return tk_t, lg_t
69
import os from collections import deque import torch from torch.utils.data import Dataset class A__ ( __magic_name__ ): def __init__( self : Union[str, Any] , a : str="" , a : str="train" ): '''simple docstring''' assert os.path.isdir(a ) lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Dict = os.listdir(a ) for story_filename in story_filenames_list: if "summary" in story_filename: continue lowerCAmelCase__ : Union[str, Any] = os.path.join(a , a ) if not os.path.isfile(a ): continue self.documents.append(a ) def __len__( self : Any ): '''simple docstring''' return len(self.documents ) def __getitem__( self : Dict , a : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.documents[idx] lowerCAmelCase__ : Union[str, Any] = document_path.split('/' )[-1] with open(a , encoding='utf-8' ) as source: lowerCAmelCase__ : List[Any] = source.read() lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = process_story(a ) return document_name, story_lines, summary_lines def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: lowerCAmelCase__ : Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE_ : len(SCREAMING_SNAKE_CASE_ ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) ) # for some unknown reason some lines miss a period, add it lowerCAmelCase__ : List[Any] = [_add_missing_period(SCREAMING_SNAKE_CASE_ ) for line in nonempty_lines] # gather article lines lowerCAmelCase__ : int = [] lowerCAmelCase__ : Any = deque(SCREAMING_SNAKE_CASE_ ) while True: try: lowerCAmelCase__ : int = lines.popleft() if element.startswith('@highlight' ): break story_lines.append(SCREAMING_SNAKE_CASE_ ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines lowerCAmelCase__ : Tuple = list(filter(lambda SCREAMING_SNAKE_CASE_ : not t.startswith('@highlight' ) , SCREAMING_SNAKE_CASE_ ) ) return story_lines, summary_lines def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any: lowerCAmelCase__ : int = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')'] if line.startswith('@highlight' ): return line if line[-1] in END_TOKENS: return line return line + "." def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if len(SCREAMING_SNAKE_CASE_ ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE_ )) ) return sequence def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: lowerCAmelCase__ : str = torch.ones_like(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : int = sequence == pad_token_id lowerCAmelCase__ : Optional[int] = 0 return mask def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: lowerCAmelCase__ : Any = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in story_lines] lowerCAmelCase__ : str = [token for sentence in story_lines_token_ids for token in sentence] lowerCAmelCase__ : Dict = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in summary_lines] lowerCAmelCase__ : str = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = [] for sequence in batch: lowerCAmelCase__ : Union[str, Any] = -1 lowerCAmelCase__ : int = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(SCREAMING_SNAKE_CASE_ ) return torch.tensor(SCREAMING_SNAKE_CASE_ )
69
1
'''simple docstring''' from math import ceil def _SCREAMING_SNAKE_CASE (A = 1_001 ) -> int: """simple docstring""" lowercase__ = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): lowercase__ = 2 * i + 1 lowercase__ = 2 * i lowercase__ = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: lowerCamelCase : int = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number')
460
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class UpperCamelCase : __UpperCamelCase =BlenderbotSmallConfig __UpperCamelCase ={} __UpperCamelCase ="gelu" def __init__( self : Optional[int] , snake_case__ : Dict , snake_case__ : Optional[int]=1_3 , snake_case__ : Optional[int]=7 , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=False , snake_case__ : int=9_9 , snake_case__ : Any=3_2 , snake_case__ : Tuple=2 , snake_case__ : Optional[int]=4 , snake_case__ : int=3_7 , snake_case__ : Optional[Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : Any=2_0 , snake_case__ : List[Any]=2 , snake_case__ : int=1 , snake_case__ : int=0 , ): """simple docstring""" SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = eos_token_id SCREAMING_SNAKE_CASE = pad_token_id SCREAMING_SNAKE_CASE = bos_token_id def UpperCamelCase ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 ) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) SCREAMING_SNAKE_CASE = prepare_blenderbot_small_inputs_dict(snake_case__ , snake_case__ , snake_case__ ) return config, inputs_dict def UpperCamelCase ( self : Dict , snake_case__ : Tuple , snake_case__ : Any ): """simple docstring""" SCREAMING_SNAKE_CASE = TFBlenderbotSmallModel(config=snake_case__ ).get_decoder() SCREAMING_SNAKE_CASE = inputs_dict['input_ids'] SCREAMING_SNAKE_CASE = input_ids[:1, :] SCREAMING_SNAKE_CASE = inputs_dict['attention_mask'][:1, :] SCREAMING_SNAKE_CASE = inputs_dict['head_mask'] SCREAMING_SNAKE_CASE = 1 # first forward pass SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 ) SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )[0] SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx] SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1E-3 ) def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : Optional[Any]=None , ) -> str: '''simple docstring''' if attention_mask is None: SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): __UpperCamelCase =( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __UpperCamelCase =(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __UpperCamelCase =( { "conversational": TFBlenderbotSmallForConditionalGeneration, "feature-extraction": TFBlenderbotSmallModel, "summarization": TFBlenderbotSmallForConditionalGeneration, "text2text-generation": TFBlenderbotSmallForConditionalGeneration, "translation": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __UpperCamelCase =True __UpperCamelCase =False __UpperCamelCase =False def UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE = TFBlenderbotSmallModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ ) def UpperCamelCase ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ ) @require_tokenizers @require_tf class UpperCamelCase ( unittest.TestCase ): __UpperCamelCase =[ "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like " " i'm going to throw up.\nand why is that?" ] __UpperCamelCase ="facebook/blenderbot_small-90M" @cached_property def UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) @cached_property def UpperCamelCase ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , return_tensors='tf' ) SCREAMING_SNAKE_CASE = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case__ , ) SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
439
0
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss A__ : Union[str, Any] = pytest.mark.integration @require_faiss class _UpperCAmelCase ( A__ ): """simple docstring""" def lowercase__ ( self : List[Any] ): '''simple docstring''' lowercase__ = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(lowerCamelCase ) for x in np.arange(30 ).tolist()]} ) return dset def lowercase__ ( self : str ): '''simple docstring''' import faiss lowercase__ = self._create_dummy_dataset() lowercase__ = dset.map( lambda lowerCamelCase, lowerCamelCase : {"vecs": i * np.ones(5, dtype=np.floataa )}, with_indices=lowerCamelCase, keep_in_memory=lowerCamelCase ) lowercase__ = dset.add_faiss_index('''vecs''', batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT ) lowercase__ , lowercase__ = dset.get_nearest_examples('''vecs''', np.ones(5, dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0], '''my_name-train_29''' ) dset.drop_index('''vecs''' ) def lowercase__ ( self : str ): '''simple docstring''' import faiss lowercase__ = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1, 1 ), index_name='''vecs''', batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT, ) lowercase__ , lowercase__ = dset.get_nearest_examples('''vecs''', np.ones(5, dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0], '''my_name-train_29''' ) def lowercase__ ( self : List[str] ): '''simple docstring''' import faiss lowercase__ = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1, 1 ), index_name='''vecs''', metric_type=faiss.METRIC_INNER_PRODUCT, ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowerCamelCase ) as tmp_file: dset.save_faiss_index('''vecs''', tmp_file.name ) dset.load_faiss_index('''vecs2''', tmp_file.name ) os.unlink(tmp_file.name ) lowercase__ , lowercase__ = dset.get_nearest_examples('''vecs2''', np.ones(5, dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0], '''my_name-train_29''' ) def lowercase__ ( self : List[Any] ): '''simple docstring''' lowercase__ = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1, 1 ), index_name='''vecs''' ) dset.drop_index('''vecs''' ) self.assertRaises(lowerCamelCase, partial(dset.get_nearest_examples, '''vecs2''', np.ones(5, dtype=np.floataa ) ) ) def lowercase__ ( self : Union[str, Any] ): '''simple docstring''' from elasticsearch import Elasticsearch lowercase__ = self._create_dummy_dataset() with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch( '''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk: lowercase__ = {'''acknowledged''': True} mocked_bulk.return_value([(True, None)] * 30 ) lowercase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}} lowercase__ = Elasticsearch() dset.add_elasticsearch_index('''filename''', es_client=lowerCamelCase ) lowercase__ , lowercase__ = dset.get_nearest_examples('''filename''', '''my_name-train_29''' ) self.assertEqual(examples['''filename'''][0], '''my_name-train_29''' ) @require_faiss class _UpperCAmelCase ( A__ ): """simple docstring""" def lowercase__ ( self : List[Any] ): '''simple docstring''' import faiss lowercase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5, dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal, 5 ) index.add_vectors(np.zeros((5, 5), dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal, 10 ) # single query lowercase__ = np.zeros(5, dtype=np.floataa ) lowercase__ = 1 lowercase__ , lowercase__ = index.search(lowerCamelCase ) self.assertRaises(lowerCamelCase, index.search, query.reshape(-1, 1 ) ) self.assertGreater(scores[0], 0 ) self.assertEqual(indices[0], 1 ) # batched queries lowercase__ = np.eye(5, dtype=np.floataa )[::-1] lowercase__ , lowercase__ = index.search_batch(lowerCamelCase ) self.assertRaises(lowerCamelCase, index.search_batch, queries[0] ) lowercase__ = [scores[0] for scores in total_scores] lowercase__ = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowerCamelCase ), 0 ) self.assertListEqual([4, 3, 2, 1, 0], lowerCamelCase ) def lowercase__ ( self : Optional[int] ): '''simple docstring''' import faiss lowercase__ = FaissIndex(string_factory='''Flat''' ) index.add_vectors(np.eye(5, dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index, faiss.IndexFlat ) lowercase__ = FaissIndex(string_factory='''LSH''' ) index.add_vectors(np.eye(5, dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index, faiss.IndexLSH ) with self.assertRaises(lowerCamelCase ): lowercase__ = FaissIndex(string_factory='''Flat''', custom_index=faiss.IndexFlat(5 ) ) def lowercase__ ( self : Tuple ): '''simple docstring''' import faiss lowercase__ = faiss.IndexFlat(5 ) lowercase__ = FaissIndex(custom_index=lowerCamelCase ) index.add_vectors(np.eye(5, dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index, faiss.IndexFlat ) def lowercase__ ( self : List[str] ): '''simple docstring''' import faiss lowercase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5, dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowerCamelCase ) as tmp_file: index.save(tmp_file.name ) lowercase__ = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) lowercase__ = np.zeros(5, dtype=np.floataa ) lowercase__ = 1 lowercase__ , lowercase__ = index.search(lowerCamelCase ) self.assertGreater(scores[0], 0 ) self.assertEqual(indices[0], 1 ) @require_faiss def a ( lowerCamelCase_ ): '''simple docstring''' import faiss lowercase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) lowercase__ = '''index.faiss''' lowercase__ = F"""mock://{index_name}""" index.save(lowerCamelCase_ , storage_options=mockfs.storage_options ) lowercase__ = FaissIndex.load(lowerCamelCase_ , storage_options=mockfs.storage_options ) lowercase__ = np.zeros(5 , dtype=np.floataa ) lowercase__ = 1 lowercase__ , lowercase__ = index.search(lowerCamelCase_ ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _UpperCAmelCase ( A__ ): """simple docstring""" def lowercase__ ( self : Dict ): '''simple docstring''' from elasticsearch import Elasticsearch with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch( '''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk: lowercase__ = Elasticsearch() lowercase__ = {'''acknowledged''': True} lowercase__ = ElasticSearchIndex(es_client=lowerCamelCase ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['''foo''', '''bar''', '''foobar'''] ) # single query lowercase__ = '''foo''' lowercase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}} lowercase__ , lowercase__ = index.search(lowerCamelCase ) self.assertEqual(scores[0], 1 ) self.assertEqual(indices[0], 0 ) # single query with timeout lowercase__ = '''foo''' lowercase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}} lowercase__ , lowercase__ = index.search(lowerCamelCase, request_timeout=30 ) self.assertEqual(scores[0], 1 ) self.assertEqual(indices[0], 0 ) # batched queries lowercase__ = ['''foo''', '''bar''', '''foobar'''] lowercase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}} lowercase__ , lowercase__ = index.search_batch(lowerCamelCase ) lowercase__ = [scores[0] for scores in total_scores] lowercase__ = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowerCamelCase ), 0 ) self.assertListEqual([1, 1, 1], lowerCamelCase ) # batched queries with timeout lowercase__ = ['''foo''', '''bar''', '''foobar'''] lowercase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}} lowercase__ , lowercase__ = index.search_batch(lowerCamelCase, request_timeout=30 ) lowercase__ = [scores[0] for scores in total_scores] lowercase__ = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowerCamelCase ), 0 ) self.assertListEqual([1, 1, 1], lowerCamelCase )
708
class _UpperCAmelCase : """simple docstring""" def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ): '''simple docstring''' # we need a list not a string, so do something to change the type lowercase__ = arr.split(''',''' ) def lowercase__ ( self : Optional[int] ): '''simple docstring''' lowercase__ = [int(self.array[0] )] * len(self.array ) lowercase__ = [int(self.array[0] )] * len(self.array ) for i in range(1, len(self.array ) ): lowercase__ = max( int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) ) lowercase__ = max(sum_value[i], rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": A__ : Dict = input('please input some numbers:') A__ : Union[str, Any] = SubArray(whole_array) A__ : int = array.solve_sub_array() print(('the results is:', re))
671
0
"""simple docstring""" import math class __lowercase : def __lowercase ( self : Any ,A : list[list[float]] ,A : list[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = 0.0 UpperCAmelCase__ : Any = 0.0 for i in range(len(A ) ): da += math.pow((sample[i] - weights[0][i]) ,2 ) da += math.pow((sample[i] - weights[1][i]) ,2 ) return 0 if da > da else 1 return 0 def __lowercase ( self : Union[str, Any] ,A : list[list[int | float]] ,A : list[int] ,A : int ,A : float ): '''simple docstring''' for i in range(len(A ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : int = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) UpperCAmelCase__ : Union[str, Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training UpperCAmelCase__ : int = SelfOrganizingMap() UpperCAmelCase__ : Any = 3 UpperCAmelCase__ : Optional[int] = 0.5 for _ in range(__UpperCamelCase ): for j in range(len(__UpperCamelCase ) ): # training sample UpperCAmelCase__ : int = training_samples[j] # Compute the winning vector UpperCAmelCase__ : List[str] = self_organizing_map.get_winner(__UpperCamelCase , __UpperCamelCase ) # Update the winning vector UpperCAmelCase__ : Optional[Any] = self_organizing_map.update(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # classify test sample UpperCAmelCase__ : List[str] = [0, 0, 0, 1] UpperCAmelCase__ : Optional[int] = self_organizing_map.get_winner(__UpperCamelCase , __UpperCamelCase ) # results print(F"Clusters that the test sample belongs to : {winner}" ) print(F"Weights that have been trained : {weights}" ) # running the main() function if __name__ == "__main__": main()
65
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''huggingface/informer-tourism-monthly''': ( '''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json''' ), # See all Informer models at https://huggingface.co/models?filter=informer } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = "informer" SCREAMING_SNAKE_CASE : int = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self : Dict , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "student_t" , _UpperCamelCase : str = "nll" , _UpperCamelCase : int = 1 , _UpperCamelCase : List[int] = None , _UpperCamelCase : Optional[Union[str, bool]] = "mean" , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : int = 6_4 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 2 , _UpperCamelCase : bool = True , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.05 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 1_0_0 , _UpperCamelCase : float = 0.02 , _UpperCamelCase : Dict=True , _UpperCamelCase : str = "prob" , _UpperCamelCase : int = 5 , _UpperCamelCase : bool = True , **_UpperCamelCase : Optional[Any] , ) ->Optional[int]: # time series specific configuration snake_case_ = prediction_length snake_case_ = context_length or prediction_length snake_case_ = distribution_output snake_case_ = loss snake_case_ = input_size snake_case_ = num_time_features snake_case_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] snake_case_ = scaling snake_case_ = num_dynamic_real_features snake_case_ = num_static_real_features snake_case_ = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(_UpperCamelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) snake_case_ = cardinality else: snake_case_ = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(_UpperCamelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) snake_case_ = embedding_dimension else: snake_case_ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] snake_case_ = num_parallel_samples # Transformer architecture configuration snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features snake_case_ = d_model snake_case_ = encoder_attention_heads snake_case_ = decoder_attention_heads snake_case_ = encoder_ffn_dim snake_case_ = decoder_ffn_dim snake_case_ = encoder_layers snake_case_ = decoder_layers snake_case_ = dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = encoder_layerdrop snake_case_ = decoder_layerdrop snake_case_ = activation_function snake_case_ = init_std snake_case_ = use_cache # Informer snake_case_ = attention_type snake_case_ = sampling_factor snake_case_ = distil super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase ) @property def snake_case__( self : Optional[Any] ) ->int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
39
0
'''simple docstring''' _SCREAMING_SNAKE_CASE = "Tobias Carryer" from time import time class _lowerCAmelCase : """simple docstring""" def __init__( self : Tuple , __snake_case : List[Any] , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any]=int(time() ) )-> Tuple: # noqa: B008 snake_case = multiplier snake_case = increment snake_case = modulo snake_case = seed def lowerCAmelCase ( self : Tuple )-> List[str]: snake_case = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. _SCREAMING_SNAKE_CASE = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31) while True: print(lcg.next_number())
703
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function _SCREAMING_SNAKE_CASE = 1.0_5457_1817E-34 # unit of ℏ : J * s _SCREAMING_SNAKE_CASE = 3E8 # unit of c : m * s^-1 def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> dict[str, float]: if (force, area, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if force < 0: raise ValueError("""Magnitude of force can not be negative""" ) if distance < 0: raise ValueError("""Distance can not be negative""" ) if area < 0: raise ValueError("""Area can not be negative""" ) if force == 0: snake_case = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_40 * (distance) ** 4 ) return {"force": force} elif area == 0: snake_case = (2_40 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: snake_case = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("""One and only one argument must be 0""" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
517
0
"""simple docstring""" def snake_case ( A__ ,A__ ): if not isinstance(A__ ,A__ ): raise ValueError("iterations must be defined as integers" ) if not isinstance(A__ ,A__ ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) UpperCAmelCase_ : Tuple = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(A__ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
95
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Union[str, Any] = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
0
import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP UpperCamelCase = False try: UpperCamelCase = _is_package_available('''google.colab''') except ModuleNotFoundError: pass @input.register class __UpperCAmelCase : def __init__( self: List[str] , UpperCAmelCase_: str = None , UpperCAmelCase_: list = [] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = choices _SCREAMING_SNAKE_CASE = prompt if sys.platform == "win32": _SCREAMING_SNAKE_CASE = """*""" else: _SCREAMING_SNAKE_CASE = """➔ """ def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: str = "" ): '''simple docstring''' if sys.platform != "win32": writeColor(self.choices[index] , 32 , UpperCAmelCase_ ) else: forceWrite(self.choices[index] , UpperCAmelCase_ ) def UpperCamelCase ( self: List[str] , UpperCAmelCase_: int ): '''simple docstring''' if index == self.position: forceWrite(F' {self.arrow_char} ' ) self.write_choice(UpperCAmelCase_ ) else: forceWrite(F' {self.choices[index]}' ) reset_cursor() def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Direction , UpperCAmelCase_: int = 1 ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(UpperCAmelCase_ ) move_cursor(UpperCAmelCase_ , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["""up"""] ) def UpperCamelCase ( self: Any ): '''simple docstring''' self.move_direction(Direction.UP ) @input.mark(KEYMAP["""down"""] ) def UpperCamelCase ( self: Dict ): '''simple docstring''' self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["""newline"""] ) def UpperCamelCase ( self: int ): '''simple docstring''' move_cursor(len(self.choices ) - self.position , """DOWN""" ) return self.position @input.mark(KEYMAP["""interrupt"""] ) def UpperCamelCase ( self: List[str] ): '''simple docstring''' move_cursor(len(self.choices ) - self.position , """DOWN""" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(UpperCAmelCase_ )] for number in range(10 )] ) def UpperCamelCase ( self: List[Any] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = int(chr(self.current_selection ) ) _SCREAMING_SNAKE_CASE = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , UpperCAmelCase_ ) else: return else: return def UpperCamelCase ( self: int , UpperCAmelCase_: int = 0 ): '''simple docstring''' if self.prompt: linebreak() forceWrite(self.prompt , """\n""" ) if in_colab: forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" ) else: forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" ) _SCREAMING_SNAKE_CASE = default_choice for i in range(len(self.choices ) ): self.print_choice(UpperCAmelCase_ ) forceWrite("""\n""" ) move_cursor(len(self.choices ) - self.position , """UP""" ) with cursor.hide(): while True: if in_colab: try: _SCREAMING_SNAKE_CASE = int(builtins.input() ) except ValueError: _SCREAMING_SNAKE_CASE = default_choice else: _SCREAMING_SNAKE_CASE = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , """UP""" ) clear_line() self.write_choice(UpperCAmelCase_ , """\n""" ) return choice
718
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class __UpperCAmelCase (_UpperCAmelCase ): __snake_case : Dict = "" __snake_case : str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) __snake_case : str = None # compression type in fsspec. ex: "gzip" __snake_case : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self: int , UpperCAmelCase_: str = "" , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: Optional[dict] = None , **UpperCAmelCase_: Any ): '''simple docstring''' super().__init__(self , **UpperCAmelCase_ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode _SCREAMING_SNAKE_CASE = fsspec.open( UpperCAmelCase_ , mode="""rb""" , protocol=UpperCAmelCase_ , compression=self.compression , client_kwargs={ """requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459 """trust_env""": True, # Enable reading proxy env variables. **(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) _SCREAMING_SNAKE_CASE = os.path.basename(self.file.path.split("""::""" )[0] ) _SCREAMING_SNAKE_CASE = ( self.compressed_name[: self.compressed_name.rindex(""".""" )] if """.""" in self.compressed_name else self.compressed_name ) _SCREAMING_SNAKE_CASE = None @classmethod def UpperCamelCase ( cls: str , UpperCAmelCase_: List[Any] ): '''simple docstring''' return super()._strip_protocol(UpperCAmelCase_ ).lstrip("""/""" ) def UpperCamelCase ( self: Tuple ): '''simple docstring''' if self.dir_cache is None: _SCREAMING_SNAKE_CASE = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name} _SCREAMING_SNAKE_CASE = {f["""name"""]: f} def UpperCamelCase ( self: str , UpperCAmelCase_: str ): '''simple docstring''' return self.file.open().read() def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: str = "rb" , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: int=True , UpperCAmelCase_: Optional[int]=None , **UpperCAmelCase_: Tuple , ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self._strip_protocol(UpperCAmelCase_ ) if mode != "rb": raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' ) return self.file.open() class __UpperCAmelCase (_UpperCAmelCase ): __snake_case : str = "bz2" __snake_case : List[str] = "bz2" __snake_case : Optional[int] = ".bz2" class __UpperCAmelCase (_UpperCAmelCase ): __snake_case : Union[str, Any] = "gzip" __snake_case : str = "gzip" __snake_case : str = ".gz" class __UpperCAmelCase (_UpperCAmelCase ): __snake_case : Tuple = "lz4" __snake_case : Any = "lz4" __snake_case : List[Any] = ".lz4" class __UpperCAmelCase (_UpperCAmelCase ): __snake_case : str = "xz" __snake_case : int = "xz" __snake_case : Dict = ".xz" class __UpperCAmelCase (_UpperCAmelCase ): __snake_case : Optional[Any] = "zstd" __snake_case : List[str] = "zstd" __snake_case : List[str] = ".zst" def __init__( self: Any , UpperCAmelCase_: str , UpperCAmelCase_: str = "rb" , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: Optional[dict] = None , UpperCAmelCase_: int = DEFAULT_BLOCK_SIZE , **UpperCAmelCase_: Union[str, Any] , ): '''simple docstring''' super().__init__( fo=UpperCAmelCase_ , mode=UpperCAmelCase_ , target_protocol=UpperCAmelCase_ , target_options=UpperCAmelCase_ , block_size=UpperCAmelCase_ , **UpperCAmelCase_ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 _SCREAMING_SNAKE_CASE = self.file.__enter__ class __UpperCAmelCase : def __init__( self: List[str] , UpperCAmelCase_: Union[str, Any] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = file_ def __enter__( self: Dict ): '''simple docstring''' self._file.__enter__() return self def __exit__( self: Optional[int] , *UpperCAmelCase_: Optional[Any] , **UpperCAmelCase_: List[Any] ): '''simple docstring''' self._file.__exit__(*UpperCAmelCase_ , **UpperCAmelCase_ ) def __iter__( self: Optional[int] ): '''simple docstring''' return iter(self._file ) def UpperCamelCase ( self: Dict ): '''simple docstring''' return next(self._file ) def __getattr__( self: List[Any] , UpperCAmelCase_: Dict ): '''simple docstring''' return getattr(self._file , UpperCAmelCase_ ) def fixed_enter(*UpperCAmelCase_: Dict , **UpperCAmelCase_: List[Any] ): return WrappedFile(_enter(*UpperCAmelCase_ , **UpperCAmelCase_ ) ) _SCREAMING_SNAKE_CASE = fixed_enter
569
0
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _UpperCAmelCase : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class UpperCAmelCase ( a_ , unittest.TestCase ): """simple docstring""" A__ : Dict = XGLMTokenizer A__ : Any = XGLMTokenizerFast A__ : Optional[int] = True A__ : Any = True def _lowercase ( self ) -> Optional[int]: super().setUp() # We have a SentencePiece fixture for testing _UpperCamelCase : Optional[Any] = XGLMTokenizer(_snake_case , keep_accents=_snake_case ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase ( self ) -> Union[str, Any]: _UpperCamelCase : List[Any] = '''<pad>''' _UpperCamelCase : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case ) def _lowercase ( self ) -> Tuple: _UpperCamelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(len(_snake_case ) , 1008 ) def _lowercase ( self ) -> Optional[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1008 ) def _lowercase ( self ) -> Optional[int]: _UpperCamelCase : Any = XGLMTokenizer(_snake_case , keep_accents=_snake_case ) _UpperCamelCase : Dict = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _UpperCamelCase : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _snake_case , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _UpperCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(_snake_case ) self.assertListEqual( _snake_case , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _UpperCamelCase : str = tokenizer.convert_ids_to_tokens(_snake_case ) self.assertListEqual( _snake_case , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def _lowercase ( self ) -> Optional[Any]: return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) def _lowercase ( self ) -> str: with tempfile.NamedTemporaryFile() as f: shutil.copyfile(_snake_case , f.name ) _UpperCamelCase : int = XGLMTokenizer(f.name , keep_accents=_snake_case ) _UpperCamelCase : List[Any] = pickle.dumps(_snake_case ) pickle.loads(_snake_case ) def _lowercase ( self ) -> Optional[int]: if not self.test_rust_tokenizer: return _UpperCamelCase : str = self.get_tokenizer() _UpperCamelCase : int = self.get_rust_tokenizer() _UpperCamelCase : Optional[Any] = '''I was born in 92000, and this is falsé.''' _UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_snake_case ) _UpperCamelCase : str = rust_tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case , _snake_case ) _UpperCamelCase : Tuple = tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) _UpperCamelCase : Dict = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) self.assertListEqual(_snake_case , _snake_case ) _UpperCamelCase : List[str] = self.get_rust_tokenizer() _UpperCamelCase : Dict = tokenizer.encode(_snake_case ) _UpperCamelCase : Optional[Any] = rust_tokenizer.encode(_snake_case ) self.assertListEqual(_snake_case , _snake_case ) @slow def _lowercase ( self ) -> int: _UpperCamelCase : Union[str, Any] = '''Hello World!''' _UpperCamelCase : Dict = [2, 31227, 4447, 35] self.assertListEqual(_snake_case , self.big_tokenizer.encode(_snake_case ) ) @slow def _lowercase ( self ) -> Tuple: _UpperCamelCase : Any = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth''' ) # fmt: off _UpperCamelCase : Dict = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] # fmt: on self.assertListEqual(_snake_case , self.big_tokenizer.encode(_snake_case ) ) @slow def _lowercase ( self ) -> Union[str, Any]: # fmt: off _UpperCamelCase : Dict = { '''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_snake_case , model_name='''facebook/xglm-564M''' , padding=_snake_case , )
683
'''simple docstring''' _UpperCAmelCase : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)] def snake_case__ ( UpperCamelCase ) -> int: _UpperCamelCase : Any = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00] number //= 10_00_00 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution _UpperCAmelCase : list[bool | None] = [None] * 10000000 _UpperCAmelCase : str = True _UpperCAmelCase : Tuple = False def snake_case__ ( UpperCamelCase ) -> bool: if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore _UpperCamelCase : List[str] = chain(next_number(UpperCamelCase ) ) _UpperCamelCase : Tuple = number_chain while number < 10_00_00_00: _UpperCamelCase : int = number_chain number *= 10 return number_chain def snake_case__ ( UpperCamelCase = 10_00_00_00 ) -> int: for i in range(1 ,UpperCamelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(f"""{solution() = }""")
683
1
"""simple docstring""" import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ) -> List[Any]: '''simple docstring''' __UpperCamelCase = s.rsplit(_snake_case ,_snake_case ) return new.join(_snake_case ) def lowercase (_snake_case ) -> List[Any]: '''simple docstring''' return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() ) def lowercase (_snake_case ) -> int: '''simple docstring''' __UpperCamelCase = {} __UpperCamelCase = ["group_1", "group_2", "group_3", "group_4"] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __UpperCamelCase = key.replace(f"""{group_key}.""" ,f"""{group_key}.group.""" ) if "res_path" in key: __UpperCamelCase = key.replace("res_path." ,"res_path.path." ) if key.endswith(".w" ): __UpperCamelCase = rreplace(_snake_case ,".w" ,".weight" ,1 ) if key.endswith(".b" ): __UpperCamelCase = rreplace(_snake_case ,".b" ,".bias" ,1 ) __UpperCamelCase = value.float() return upgrade @torch.no_grad() def lowercase (_snake_case ,_snake_case ,_snake_case=None ,_snake_case=True ) -> List[Any]: '''simple docstring''' from dall_e import Encoder __UpperCamelCase = Encoder() if os.path.exists(_snake_case ): __UpperCamelCase = torch.load(_snake_case ) else: __UpperCamelCase = torch.hub.load_state_dict_from_url(_snake_case ) if isinstance(_snake_case ,_snake_case ): __UpperCamelCase = ckpt.state_dict() encoder.load_state_dict(_snake_case ) if config_path is not None: __UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(_snake_case ) else: __UpperCamelCase = FlavaImageCodebookConfig() __UpperCamelCase = FlavaImageCodebook(_snake_case ).eval() __UpperCamelCase = encoder.state_dict() __UpperCamelCase = upgrade_state_dict(_snake_case ) hf_model.load_state_dict(_snake_case ) __UpperCamelCase = hf_model.state_dict() __UpperCamelCase = count_parameters(_snake_case ) __UpperCamelCase = count_parameters(_snake_case ) assert torch.allclose(_snake_case ,_snake_case ,atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(_snake_case ) else: return hf_state_dict if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") _A = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
228
"""simple docstring""" def lowercase (_snake_case ,_snake_case ,_snake_case ) -> float: '''simple docstring''' __UpperCamelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def lowercase () -> Dict: '''simple docstring''' print(sum_of_series(1 ,1 ,10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
228
1
"""simple docstring""" import sys from collections import defaultdict class a__ : def __init__( self : List[Any]) -> Optional[int]: """simple docstring""" _lowerCAmelCase:Optional[Any] = [] def __UpperCamelCase ( self : int ,a__ : List[Any]) -> Dict: """simple docstring""" return self.node_position[vertex] def __UpperCamelCase ( self : Optional[int] ,a__ : List[str] ,a__ : Optional[Any]) -> List[Any]: """simple docstring""" _lowerCAmelCase:List[str] = pos def __UpperCamelCase ( self : int ,a__ : int ,a__ : int ,a__ : Any ,a__ : str) -> Optional[Any]: """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _lowerCAmelCase:int = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _lowerCAmelCase:int = 2 * start + 1 else: _lowerCAmelCase:Any = 2 * start + 2 if heap[smallest_child] < heap[start]: _lowerCAmelCase:Tuple = heap[smallest_child], positions[smallest_child] _lowerCAmelCase:Dict = ( heap[start], positions[start], ) _lowerCAmelCase:Union[str, Any] = temp, tempa _lowerCAmelCase:Tuple = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child] ,self.get_position(positions[start])) self.set_position(positions[start] ,a__) self.top_to_bottom(a__ ,a__ ,a__ ,a__) def __UpperCamelCase ( self : str ,a__ : str ,a__ : List[str] ,a__ : Any ,a__ : Union[str, Any]) -> int: """simple docstring""" _lowerCAmelCase:List[Any] = position[index] while index != 0: _lowerCAmelCase:List[str] = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: _lowerCAmelCase:Optional[Any] = heap[parent] _lowerCAmelCase:Dict = position[parent] self.set_position(position[parent] ,a__) else: _lowerCAmelCase:int = val _lowerCAmelCase:int = temp self.set_position(a__ ,a__) break _lowerCAmelCase:List[str] = parent else: _lowerCAmelCase:Dict = val _lowerCAmelCase:Optional[int] = temp self.set_position(a__ ,0) def __UpperCamelCase ( self : Optional[int] ,a__ : Dict ,a__ : Tuple) -> List[str]: """simple docstring""" _lowerCAmelCase:Any = len(a__) // 2 - 1 for i in range(a__ ,-1 ,-1): self.top_to_bottom(a__ ,a__ ,len(a__) ,a__) def __UpperCamelCase ( self : List[Any] ,a__ : List[Any] ,a__ : List[Any]) -> List[str]: """simple docstring""" _lowerCAmelCase:Optional[Any] = positions[0] _lowerCAmelCase:Optional[int] = sys.maxsize self.top_to_bottom(a__ ,0 ,len(a__) ,a__) return temp def UpperCAmelCase ( snake_case : int ): _lowerCAmelCase:List[Any] = Heap() _lowerCAmelCase:List[Any] = [0] * len(_lowerCamelCase ) _lowerCAmelCase:Dict = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _lowerCAmelCase:List[Any] = [] # Heap of Distance of vertices from their neighboring vertex _lowerCAmelCase:str = [] for vertex in range(len(_lowerCamelCase ) ): distance_tv.append(sys.maxsize ) positions.append(_lowerCamelCase ) heap.node_position.append(_lowerCamelCase ) _lowerCAmelCase:Optional[int] = [] _lowerCAmelCase:List[str] = 1 _lowerCAmelCase:Any = sys.maxsize for neighbor, distance in adjacency_list[0]: _lowerCAmelCase:List[str] = 0 _lowerCAmelCase:List[Any] = distance heap.heapify(_lowerCamelCase , _lowerCamelCase ) for _ in range(1 , len(_lowerCamelCase ) ): _lowerCAmelCase:Tuple = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _lowerCAmelCase:Any = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_lowerCamelCase )] ): _lowerCAmelCase:Tuple = distance heap.bottom_to_top( _lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase:int = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCamelCase__ = int(input('''Enter number of edges: ''').strip()) UpperCamelCase__ = defaultdict(list) for _ in range(edges_number): UpperCamelCase__ = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
227
'''simple docstring''' import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename __UpperCamelCase = "http://www.mocksite.com/file1.txt" __UpperCamelCase = "\"text\": [\"foo\", \"foo\"]" __UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8" class _A : lowercase__: str = 200 lowercase__: List[str] = {'''Content-Length''': '''100'''} lowercase__: Union[str, Any] = {} def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> Dict: """simple docstring""" return [bytes(__magic_name__ , """utf-8""" )] def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]: """simple docstring""" return MockResponse() @pytest.mark.parametrize("""urls_type""" , [str, list, dict] ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: """simple docstring""" import requests monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase ) __snake_case : Union[str, Any] = URL if issubclass(_lowerCamelCase , _lowerCamelCase ): __snake_case : str = url elif issubclass(_lowerCamelCase , _lowerCamelCase ): __snake_case : Dict = [url] elif issubclass(_lowerCamelCase , _lowerCamelCase ): __snake_case : Union[str, Any] = {"""train""": url} __snake_case : Dict = """dummy""" __snake_case : List[str] = """downloads""" __snake_case : List[Any] = tmp_path __snake_case : List[Any] = DownloadConfig( cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , ) __snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase ) __snake_case : int = dl_manager.download(_lowerCamelCase ) __snake_case : Tuple = urls for downloaded_paths in [downloaded_paths]: if isinstance(_lowerCamelCase , _lowerCamelCase ): __snake_case : Any = [downloaded_paths] __snake_case : List[Any] = [urls] elif isinstance(_lowerCamelCase , _lowerCamelCase ): assert "train" in downloaded_paths.keys() __snake_case : Tuple = downloaded_paths.values() __snake_case : Optional[int] = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ): assert downloaded_path == dl_manager.downloaded_paths[input_url] __snake_case : List[str] = Path(_lowerCamelCase ) __snake_case : Any = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() __snake_case : Union[str, Any] = downloaded_path.read_text() assert content == CONTENT __snake_case : List[str] = downloaded_path.with_suffix(""".json""" ) assert metadata_downloaded_path.exists() __snake_case : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize("""paths_type""" , [str, list, dict] ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" __snake_case : Any = str(_lowerCamelCase ) if issubclass(_lowerCamelCase , _lowerCamelCase ): __snake_case : Optional[int] = filename elif issubclass(_lowerCamelCase , _lowerCamelCase ): __snake_case : Tuple = [filename] elif issubclass(_lowerCamelCase , _lowerCamelCase ): __snake_case : Dict = {"""train""": filename} __snake_case : Optional[Any] = """dummy""" __snake_case : List[Any] = xz_file.parent __snake_case : int = """extracted""" __snake_case : Dict = DownloadConfig( cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , ) __snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase ) __snake_case : Optional[Any] = dl_manager.extract(_lowerCamelCase ) __snake_case : Union[str, Any] = paths for extracted_paths in [extracted_paths]: if isinstance(_lowerCamelCase , _lowerCamelCase ): __snake_case : Dict = [extracted_paths] __snake_case : int = [paths] elif isinstance(_lowerCamelCase , _lowerCamelCase ): assert "train" in extracted_paths.keys() __snake_case : int = extracted_paths.values() __snake_case : int = paths.values() assert extracted_paths for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ): assert extracted_path == dl_manager.extracted_paths[input_path] __snake_case : Any = Path(_lowerCamelCase ) __snake_case : str = extracted_path.parts assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase ) assert parts[-2] == extracted_subdir assert extracted_path.exists() __snake_case : Optional[int] = extracted_path.read_text() __snake_case : str = text_file.read_text() assert extracted_file_content == expected_file_content def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: """simple docstring""" assert path.endswith(""".jsonl""" ) for num_items, line in enumerate(_lowerCamelCase , start=1 ): __snake_case : Tuple = json.loads(line.decode("""utf-8""" ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: """simple docstring""" __snake_case : Any = request.getfixturevalue(_lowerCamelCase ) __snake_case : str = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ): _test_jsonl(_lowerCamelCase , _lowerCamelCase ) assert num_jsonl == 2 @pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : int = request.getfixturevalue(_lowerCamelCase ) __snake_case : List[str] = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ): _test_jsonl(_lowerCamelCase , _lowerCamelCase ) assert num_tar == 1 assert num_jsonl == 2 def _a ( _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : List[str] = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ): assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
26
0
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) lowerCamelCase : Optional[int] = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation="relu")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation="relu")) classifier.add(layers.Dense(units=1, activation="sigmoid")) # Compiling the CNN classifier.compile( optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') lowerCamelCase : Tuple = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) lowerCamelCase : Any = train_datagen.flow_from_directory( "dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary" ) lowerCamelCase : str = test_datagen.flow_from_directory( "dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save("cnn.h5") # Part 3 - Making new predictions lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.load_img( "dataset/single_prediction/image.png", target_size=(64, 64) ) lowerCamelCase : str = tf.keras.preprocessing.image.img_to_array(test_image) lowerCamelCase : str = np.expand_dims(test_image, axis=0) lowerCamelCase : str = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: lowerCamelCase : Any = "Normal" if result[0][0] == 1: lowerCamelCase : str = "Abnormality detected"
651
import os import re import shutil import sys import tempfile import unittest import black lowerCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. lowerCamelCase : Tuple = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) ) lowerCamelCase_ = self.diffusers_dir shutil.copy( os.path.join(A_ , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" lowerCamelCase_ = 'src/diffusers' shutil.rmtree(self.diffusers_dir ) def a__ ( self : str , A_ : Optional[Any] , A_ : Optional[int] , A_ : str , A_ : Optional[Any]=None ) -> int: """simple docstring""" lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: lowerCamelCase_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowerCamelCase_ = black.format_str(A_ , mode=A_ ) lowerCamelCase_ = os.path.join(self.diffusers_dir , 'new_code.py' ) with open(A_ , 'w' , newline='\n' ) as f: f.write(A_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(A_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=A_ ) with open(A_ , 'r' ) as f: self.assertTrue(f.read() , A_ ) def a__ ( self : Optional[int] ) -> Dict: """simple docstring""" lowerCamelCase_ = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' ) self.assertEqual(A_ , A_ ) def a__ ( self : Any ) -> Dict: """simple docstring""" self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , ) # With no empty line at the end self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , A_ , ) # Copy consistency with rename self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , A_ ) , ) # Copy consistency with a really long name lowerCamelCase_ = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , A_ , A_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , A_ , overwrite_result=re.sub('DDPM' , 'Test' , A_ ) , )
651
1
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _a (_lowerCamelCase , unittest.TestCase): """simple docstring""" SCREAMING_SNAKE_CASE = DebertaTokenizer SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = DebertaTokenizerFast def UpperCamelCase ( self ) -> Tuple: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _SCREAMING_SNAKE_CASE = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """[UNK]""", ] _SCREAMING_SNAKE_CASE = dict(zip(A__ , range(len(A__ ) ) ) ) _SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] _SCREAMING_SNAKE_CASE = {"""unk_token""": """[UNK]"""} _SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) _SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(A__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A__ ) ) def UpperCamelCase ( self , **A__ ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self , A__ ) -> List[str]: _SCREAMING_SNAKE_CASE = """lower newer""" _SCREAMING_SNAKE_CASE = """lower newer""" return input_text, output_text def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = """lower newer""" _SCREAMING_SNAKE_CASE = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] _SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) _SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token] _SCREAMING_SNAKE_CASE = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ ) def UpperCamelCase ( self ) -> str: _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = tokenizer("""Hello""" , """World""" ) _SCREAMING_SNAKE_CASE = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd["""token_type_ids"""] , A__ ) @slow def UpperCamelCase ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" ) _SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode( """sequence builders""" , add_special_tokens=A__ , add_prefix_space=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=A__ , add_prefix_space=A__ ) _SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ ) _SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ , A__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def UpperCamelCase ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: _SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained("""microsoft/deberta-base""" ) _SCREAMING_SNAKE_CASE = [ """ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""", """ALBERT incorporates two parameter reduction techniques""", """The first one is a factorized embedding parameterization. By decomposing the large vocabulary""" """ embedding matrix into two small matrices, we separate the size of the hidden layers from the size of""" """ vocabulary embedding.""", ] _SCREAMING_SNAKE_CASE = tokenizer(A__ , padding=A__ ) _SCREAMING_SNAKE_CASE = [tokenizer.decode(A__ , skip_special_tokens=A__ ) for seq in encoding["""input_ids"""]] # fmt: off _SCREAMING_SNAKE_CASE = { """input_ids""": [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], """token_type_ids""": [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on _SCREAMING_SNAKE_CASE = [ """ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""", """ALBERT incorporates two parameter reduction techniques""", """The first one is a factorized embedding parameterization. By decomposing the large vocabulary""" """ embedding matrix into two small matrices, we separate the size of the hidden layers from the size of""" """ vocabulary embedding.""", ] self.assertDictEqual(encoding.data , A__ ) for expected, decoded in zip(A__ , A__ ): self.assertEqual(A__ , A__ )
591
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : int = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = { "microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json", # See all Cvt models at https://huggingface.co/models?filter=cvt } class _a (_lowerCamelCase): """simple docstring""" SCREAMING_SNAKE_CASE = 'cvt' def __init__( self , A__=3 , A__=[7, 3, 3] , A__=[4, 2, 2] , A__=[2, 1, 1] , A__=[64, 1_92, 3_84] , A__=[1, 3, 6] , A__=[1, 2, 10] , A__=[4.0, 4.0, 4.0] , A__=[0.0, 0.0, 0.0] , A__=[0.0, 0.0, 0.0] , A__=[0.0, 0.0, 0.1] , A__=[True, True, True] , A__=[False, False, True] , A__=["dw_bn", "dw_bn", "dw_bn"] , A__=[3, 3, 3] , A__=[1, 1, 1] , A__=[2, 2, 2] , A__=[1, 1, 1] , A__=[1, 1, 1] , A__=0.02 , A__=1E-12 , **A__ , ) -> Dict: super().__init__(**A__ ) _SCREAMING_SNAKE_CASE = num_channels _SCREAMING_SNAKE_CASE = patch_sizes _SCREAMING_SNAKE_CASE = patch_stride _SCREAMING_SNAKE_CASE = patch_padding _SCREAMING_SNAKE_CASE = embed_dim _SCREAMING_SNAKE_CASE = num_heads _SCREAMING_SNAKE_CASE = depth _SCREAMING_SNAKE_CASE = mlp_ratio _SCREAMING_SNAKE_CASE = attention_drop_rate _SCREAMING_SNAKE_CASE = drop_rate _SCREAMING_SNAKE_CASE = drop_path_rate _SCREAMING_SNAKE_CASE = qkv_bias _SCREAMING_SNAKE_CASE = cls_token _SCREAMING_SNAKE_CASE = qkv_projection_method _SCREAMING_SNAKE_CASE = kernel_qkv _SCREAMING_SNAKE_CASE = padding_kv _SCREAMING_SNAKE_CASE = stride_kv _SCREAMING_SNAKE_CASE = padding_q _SCREAMING_SNAKE_CASE = stride_q _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = layer_norm_eps
591
1
from ..utils import DummyObject, requires_backends class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : Optional[Any] = ["""flax"""] def __init__( self : Any , *snake_case_ : Union[str, Any] , **snake_case_ : Dict ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : str , *snake_case_ : Optional[Any] , **snake_case_ : List[Any] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : str , *snake_case_ : int , **snake_case_ : int ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : Optional[int] = ["""flax"""] def __init__( self : Tuple , *snake_case_ : Any , **snake_case_ : Union[str, Any] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : str , *snake_case_ : str , **snake_case_ : str ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Any , *snake_case_ : Dict , **snake_case_ : Optional[Any] ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : List[Any] = ["""flax"""] def __init__( self : int , *snake_case_ : int , **snake_case_ : Optional[int] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Dict , *snake_case_ : Any , **snake_case_ : List[str] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Union[str, Any] , *snake_case_ : Optional[int] , **snake_case_ : List[Any] ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : List[str] = ["""flax"""] def __init__( self : str , *snake_case_ : List[Any] , **snake_case_ : int ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Optional[Any] , *snake_case_ : str , **snake_case_ : Optional[int] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : str , *snake_case_ : List[str] , **snake_case_ : List[Any] ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : List[str] = ["""flax"""] def __init__( self : int , *snake_case_ : List[str] , **snake_case_ : List[str] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Optional[Any] , *snake_case_ : str , **snake_case_ : Dict ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Union[str, Any] , *snake_case_ : List[Any] , **snake_case_ : List[Any] ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : Tuple = ["""flax"""] def __init__( self : Optional[Any] , *snake_case_ : List[Any] , **snake_case_ : Tuple ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : int , *snake_case_ : Optional[int] , **snake_case_ : Any ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : int , *snake_case_ : int , **snake_case_ : Union[str, Any] ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : Union[str, Any] = ["""flax"""] def __init__( self : List[Any] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[int] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Tuple , *snake_case_ : Union[str, Any] , **snake_case_ : str ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : int , *snake_case_ : List[str] , **snake_case_ : Optional[int] ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : Union[str, Any] = ["""flax"""] def __init__( self : Dict , *snake_case_ : int , **snake_case_ : List[Any] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Tuple , *snake_case_ : str , **snake_case_ : str ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Optional[Any] , *snake_case_ : List[Any] , **snake_case_ : Union[str, Any] ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : List[Any] = ["""flax"""] def __init__( self : Tuple , *snake_case_ : str , **snake_case_ : int ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : List[Any] , *snake_case_ : Optional[int] , **snake_case_ : List[Any] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Tuple , *snake_case_ : List[str] , **snake_case_ : Optional[int] ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : Dict = ["""flax"""] def __init__( self : str , *snake_case_ : Optional[int] , **snake_case_ : Any ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : List[str] , *snake_case_ : Optional[int] , **snake_case_ : Optional[int] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : List[Any] , *snake_case_ : str , **snake_case_ : Tuple ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : List[str] = ["""flax"""] def __init__( self : Union[str, Any] , *snake_case_ : Optional[Any] , **snake_case_ : Optional[int] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : int , *snake_case_ : List[Any] , **snake_case_ : List[Any] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Union[str, Any] , *snake_case_ : str , **snake_case_ : Any ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : Any = ["""flax"""] def __init__( self : Union[str, Any] , *snake_case_ : str , **snake_case_ : Optional[Any] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[int] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Optional[int] , *snake_case_ : int , **snake_case_ : int ): requires_backends(cls , ["""flax"""] ) class _UpperCamelCase ( metaclass=_A ): '''simple docstring''' __UpperCamelCase : List[str] = ["""flax"""] def __init__( self : List[str] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[Any] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Optional[int] , *snake_case_ : str , **snake_case_ : int ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase__ ( cls : Optional[int] , *snake_case_ : List[Any] , **snake_case_ : Optional[Any] ): requires_backends(cls , ["""flax"""] )
670
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ : str = { """configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""], """tokenization_roformer""": ["""RoFormerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Any = [ """ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """RoFormerForCausalLM""", """RoFormerForMaskedLM""", """RoFormerForMultipleChoice""", """RoFormerForQuestionAnswering""", """RoFormerForSequenceClassification""", """RoFormerForTokenClassification""", """RoFormerLayer""", """RoFormerModel""", """RoFormerPreTrainedModel""", """load_tf_weights_in_roformer""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Dict = [ """TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRoFormerForCausalLM""", """TFRoFormerForMaskedLM""", """TFRoFormerForMultipleChoice""", """TFRoFormerForQuestionAnswering""", """TFRoFormerForSequenceClassification""", """TFRoFormerForTokenClassification""", """TFRoFormerLayer""", """TFRoFormerModel""", """TFRoFormerPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Optional[Any] = [ """FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """FlaxRoFormerForMaskedLM""", """FlaxRoFormerForMultipleChoice""", """FlaxRoFormerForQuestionAnswering""", """FlaxRoFormerForSequenceClassification""", """FlaxRoFormerForTokenClassification""", """FlaxRoFormerModel""", """FlaxRoFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
670
1
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig UpperCAmelCase_ = logging.get_logger(__name__) # General docstring UpperCAmelCase_ = """ResNetConfig""" # Base docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = [1, 2_0_4_8, 7, 7] # Image classification docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = """tiger cat""" UpperCAmelCase_ = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : str = "relu" ) -> Tuple: super().__init__() _A = nn.Convad( __lowerCAmelCase , __lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=kernel_size // 2 , bias=__lowerCAmelCase ) _A = nn.BatchNormad(__lowerCAmelCase ) _A = ACTaFN[activation] if activation is not None else nn.Identity() def snake_case_ ( self : Tuple , __lowerCAmelCase : Tensor ) -> Tensor: _A = self.convolution(__lowerCAmelCase ) _A = self.normalization(__lowerCAmelCase ) _A = self.activation(__lowerCAmelCase ) return hidden_state class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : ResNetConfig ) -> List[Any]: super().__init__() _A = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) _A = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) _A = config.num_channels def snake_case_ ( self : List[str] , __lowerCAmelCase : Tensor ) -> Tensor: _A = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) _A = self.embedder(__lowerCAmelCase ) _A = self.pooler(__lowerCAmelCase ) return embedding class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__( self : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 2 ) -> List[str]: super().__init__() _A = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , stride=__lowerCAmelCase , bias=__lowerCAmelCase ) _A = nn.BatchNormad(__lowerCAmelCase ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tensor ) -> Tensor: _A = self.convolution(__lowerCAmelCase ) _A = self.normalization(__lowerCAmelCase ) return hidden_state class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : str = "relu" ) -> Dict: super().__init__() _A = in_channels != out_channels or stride != 1 _A = ( ResNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity() ) _A = nn.Sequential( ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , activation=__lowerCAmelCase ) , ) _A = ACTaFN[activation] def snake_case_ ( self : Any , __lowerCAmelCase : Any ) -> List[str]: _A = hidden_state _A = self.layer(__lowerCAmelCase ) _A = self.shortcut(__lowerCAmelCase ) hidden_state += residual _A = self.activation(__lowerCAmelCase ) return hidden_state class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : str = "relu" , __lowerCAmelCase : int = 4 ) -> Optional[Any]: super().__init__() _A = in_channels != out_channels or stride != 1 _A = out_channels // reduction _A = ( ResNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity() ) _A = nn.Sequential( ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase ) , ) _A = ACTaFN[activation] def snake_case_ ( self : Any , __lowerCAmelCase : List[str] ) -> Optional[Any]: _A = hidden_state _A = self.layer(__lowerCAmelCase ) _A = self.shortcut(__lowerCAmelCase ) hidden_state += residual _A = self.activation(__lowerCAmelCase ) return hidden_state class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__( self : str , __lowerCAmelCase : ResNetConfig , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , ) -> Tuple: super().__init__() _A = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer _A = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , activation=config.hidden_act ) , *[layer(__lowerCAmelCase , __lowerCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def snake_case_ ( self : Any , __lowerCAmelCase : Tensor ) -> Tensor: _A = input for layer in self.layers: _A = layer(__lowerCAmelCase ) return hidden_state class lowerCamelCase__ ( nn.Module): """simple docstring""" def __init__( self : Dict , __lowerCAmelCase : ResNetConfig ) -> Optional[Any]: super().__init__() _A = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( __lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _A = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(__lowerCAmelCase , config.depths[1:] ): self.stages.append(ResNetStage(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , depth=__lowerCAmelCase ) ) def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tensor , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention: _A = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _A = hidden_states + (hidden_state,) _A = stage_module(__lowerCAmelCase ) if output_hidden_states: _A = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=__lowerCAmelCase , hidden_states=__lowerCAmelCase , ) class lowerCamelCase__ ( _A): """simple docstring""" a__ : Dict = ResNetConfig a__ : Union[str, Any] = "resnet" a__ : Dict = "pixel_values" a__ : Dict = True def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[str] ) -> Optional[Any]: if isinstance(__lowerCAmelCase , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' ) elif isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def snake_case_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=False ) -> Any: if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _A = value UpperCAmelCase_ = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCAmelCase_ = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare ResNet model outputting raw features without any specific head on top." , _A , ) class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : str , __lowerCAmelCase : int ) -> Tuple: super().__init__(__lowerCAmelCase ) _A = config _A = ResNetEmbeddings(__lowerCAmelCase ) _A = ResNetEncoder(__lowerCAmelCase ) _A = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def snake_case_ ( self : List[str] , __lowerCAmelCase : Tensor , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: _A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _A = return_dict if return_dict is not None else self.config.use_return_dict _A = self.embedder(__lowerCAmelCase ) _A = self.encoder( __lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase ) _A = encoder_outputs[0] _A = self.pooler(__lowerCAmelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _A , ) class lowerCamelCase__ ( _A): """simple docstring""" def __init__( self : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[Any]: super().__init__(__lowerCAmelCase ) _A = config.num_labels _A = ResNetModel(__lowerCAmelCase ) # classification head _A = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[torch.LongTensor] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: _A = return_dict if return_dict is not None else self.config.use_return_dict _A = self.resnet(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase ) _A = outputs.pooler_output if return_dict else outputs[1] _A = self.classifier(__lowerCAmelCase ) _A = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _A = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _A = '''single_label_classification''' else: _A = '''multi_label_classification''' if self.config.problem_type == "regression": _A = MSELoss() if self.num_labels == 1: _A = loss_fct(logits.squeeze() , labels.squeeze() ) else: _A = loss_fct(__lowerCAmelCase , __lowerCAmelCase ) elif self.config.problem_type == "single_label_classification": _A = CrossEntropyLoss() _A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _A = BCEWithLogitsLoss() _A = loss_fct(__lowerCAmelCase , __lowerCAmelCase ) if not return_dict: _A = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states ) @add_start_docstrings( "\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , _A , ) class lowerCamelCase__ ( _A , _A): """simple docstring""" def __init__( self : int , __lowerCAmelCase : str ) -> Union[str, Any]: super().__init__(__lowerCAmelCase ) super()._init_backbone(__lowerCAmelCase ) _A = [config.embedding_size] + config.hidden_sizes _A = ResNetEmbeddings(__lowerCAmelCase ) _A = ResNetEncoder(__lowerCAmelCase ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__lowerCAmelCase ) @replace_return_docstrings(output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC ) def snake_case_ ( self : Dict , __lowerCAmelCase : Tensor , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None ) -> BackboneOutput: _A = return_dict if return_dict is not None else self.config.use_return_dict _A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _A = self.embedder(__lowerCAmelCase ) _A = self.encoder(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase ) _A = outputs.hidden_states _A = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: _A = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=__lowerCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__lowerCAmelCase , )
2
"""simple docstring""" import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class __UpperCAmelCase ( _UpperCamelCase ): # to overwrite at feature extractactor specific tests __lowerCamelCase : int = None __lowerCamelCase : Tuple = None @property def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' return self.feat_extract_tester.prepare_feat_extract_dict() def UpperCAmelCase ( self : Optional[Any] ) -> Dict: '''simple docstring''' a__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(a_ , "feature_size" ) ) self.assertTrue(hasattr(a_ , "sampling_rate" ) ) self.assertTrue(hasattr(a_ , "padding_value" ) ) def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' a__ : Any = self.feat_extract_tester.prepare_inputs_for_common() a__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) a__ : List[str] = feat_extract.model_input_names[0] a__ : Tuple = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_ , processed_features[input_name] ) ) ) a__ : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a_ ) a__ : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" ) a__ : Union[str, Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: a__ : List[Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def UpperCAmelCase ( self : Optional[Any] ) -> Tuple: '''simple docstring''' a__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a_ ) a__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) a__ : Optional[Any] = feat_extract.model_input_names[0] a__ : Any = BatchFeature({input_name: speech_inputs} , tensor_type="pt" ) a__ : Optional[int] = processed_features[input_name] if len(batch_features_input.shape ) < 3: a__ : Any = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def UpperCAmelCase ( self : Optional[Any] ) -> str: '''simple docstring''' a__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a_ ) a__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) a__ : str = feat_extract.model_input_names[0] a__ : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type="tf" ) a__ : Tuple = processed_features[input_name] if len(batch_features_input.shape ) < 3: a__ : List[str] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def UpperCAmelCase ( self : Dict , a_ : Optional[Any]=False ) -> List[Any]: '''simple docstring''' def _inputs_have_equal_length(a_ : int ): a__ : Any = len(input[0] ) for input_slice in input[1:]: if len(a_ ) != length: return False return True def _inputs_are_equal(a_ : List[Any] , a_ : str ): if len(a_ ) != len(a_ ): return False for input_slice_a, input_slice_a in zip(a_ , a_ ): if not np.allclose(np.asarray(a_ ) , np.asarray(a_ ) , atol=1E-3 ): return False return True a__ : int = self.feature_extraction_class(**self.feat_extract_dict ) a__ : int = self.feat_extract_tester.prepare_inputs_for_common(numpify=a_ ) a__ : Tuple = feat_extract.model_input_names[0] a__ : List[str] = BatchFeature({input_name: speech_inputs} ) a__ : Optional[int] = self.feat_extract_tester.seq_length_diff a__ : Optional[Any] = self.feat_extract_tester.max_seq_length + pad_diff a__ : Union[str, Any] = self.feat_extract_tester.min_seq_length a__ : Tuple = self.feat_extract_tester.batch_size a__ : str = self.feat_extract_tester.feature_size # test padding for List[int] + numpy a__ : Any = feat_extract.pad(a_ , padding=a_ ) a__ : List[str] = input_a[input_name] a__ : int = feat_extract.pad(a_ , padding="longest" ) a__ : Optional[Any] = input_a[input_name] a__ : Optional[Any] = feat_extract.pad(a_ , padding="max_length" , max_length=len(speech_inputs[-1] ) ) a__ : Any = input_a[input_name] a__ : Tuple = feat_extract.pad(a_ , padding="longest" , return_tensors="np" ) a__ : Optional[Any] = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(a_ ): feat_extract.pad(a_ , padding="max_length" )[input_name] a__ : Tuple = feat_extract.pad( a_ , padding="max_length" , max_length=a_ , return_tensors="np" ) a__ : Optional[Any] = input_a[input_name] self.assertFalse(_inputs_have_equal_length(a_ ) ) self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertTrue(_inputs_are_equal(a_ , a_ ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy a__ : Optional[int] = feat_extract.pad(a_ , pad_to_multiple_of=10 ) a__ : Optional[Any] = input_a[input_name] a__ : List[str] = feat_extract.pad(a_ , padding="longest" , pad_to_multiple_of=10 ) a__ : Dict = input_a[input_name] a__ : List[Any] = feat_extract.pad( a_ , padding="max_length" , pad_to_multiple_of=10 , max_length=a_ ) a__ : Tuple = input_a[input_name] a__ : List[Any] = feat_extract.pad( a_ , padding="max_length" , pad_to_multiple_of=10 , max_length=a_ , return_tensors="np" , ) a__ : Optional[int] = input_a[input_name] self.assertTrue(all(len(a_ ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(a_ , a_ ) ) a__ : List[str] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(a_ ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct a__ : Optional[int] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1E-3 ) def UpperCAmelCase ( self : Dict , a_ : Optional[int]=False ) -> List[str]: '''simple docstring''' def _inputs_have_equal_length(a_ : List[Any] ): a__ : List[str] = len(input[0] ) for input_slice in input[1:]: if len(a_ ) != length: return False return True def _inputs_are_equal(a_ : Optional[int] , a_ : List[Any] ): if len(a_ ) != len(a_ ): return False for input_slice_a, input_slice_a in zip(a_ , a_ ): if not np.allclose(np.asarray(a_ ) , np.asarray(a_ ) , atol=1E-3 ): return False return True a__ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) a__ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=a_ ) a__ : List[str] = feat_extract.model_input_names[0] a__ : str = BatchFeature({input_name: speech_inputs} ) # truncate to smallest a__ : int = feat_extract.pad( a_ , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=a_ ) a__ : Optional[Any] = input_a[input_name] a__ : int = feat_extract.pad(a_ , padding="max_length" , max_length=len(speech_inputs[0] ) ) a__ : List[Any] = input_a[input_name] self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertFalse(_inputs_have_equal_length(a_ ) ) # truncate to smallest with np a__ : Union[str, Any] = feat_extract.pad( a_ , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=a_ , ) a__ : Tuple = input_a[input_name] a__ : str = feat_extract.pad( a_ , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" ) a__ : Union[str, Any] = input_a[input_name] self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(a_ ) ) # truncate to middle a__ : str = feat_extract.pad( a_ , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=a_ , return_tensors="np" , ) a__ : List[Any] = input_a[input_name] a__ : List[str] = feat_extract.pad( a_ , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=a_ ) a__ : str = input_a[input_name] a__ : Any = feat_extract.pad( a_ , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" ) a__ : Optional[int] = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertTrue(_inputs_are_equal(a_ , a_ ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(a_ ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(a_ ): feat_extract.pad(a_ , truncation=a_ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(a_ ): feat_extract.pad(a_ , padding="longest" , truncation=a_ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(a_ ): feat_extract.pad(a_ , padding="longest" , truncation=a_ )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(a_ ): feat_extract.pad(a_ , padding="max_length" , truncation=a_ )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy a__ : Tuple = 12 a__ : Dict = feat_extract.pad( a_ , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=a_ , truncation=a_ , ) a__ : Optional[int] = input_a[input_name] a__ : List[Any] = feat_extract.pad( a_ , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=a_ , ) a__ : str = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of a__ : Dict = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: a__ : Tuple = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(a_ ) ) self.assertFalse(_inputs_have_equal_length(a_ ) ) def UpperCAmelCase ( self : List[str] ) -> List[str]: '''simple docstring''' self._check_padding(numpify=a_ ) def UpperCAmelCase ( self : Tuple ) -> Tuple: '''simple docstring''' self._check_padding(numpify=a_ ) def UpperCAmelCase ( self : int ) -> Optional[Any]: '''simple docstring''' self._check_truncation(numpify=a_ ) def UpperCAmelCase ( self : str ) -> int: '''simple docstring''' self._check_truncation(numpify=a_ ) @require_torch def UpperCAmelCase ( self : List[Any] ) -> Tuple: '''simple docstring''' a__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) a__ : Tuple = self.feat_extract_tester.prepare_inputs_for_common() a__ : int = feat_extract.model_input_names[0] a__ : str = BatchFeature({input_name: speech_inputs} ) a__ : int = feat_extract.pad(a_ , padding="longest" , return_tensors="np" )[input_name] a__ : str = feat_extract.pad(a_ , padding="longest" , return_tensors="pt" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) @require_tf def UpperCAmelCase ( self : Optional[Any] ) -> str: '''simple docstring''' a__ : int = self.feature_extraction_class(**self.feat_extract_dict ) a__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common() a__ : Optional[Any] = feat_extract.model_input_names[0] a__ : Dict = BatchFeature({input_name: speech_inputs} ) a__ : List[Any] = feat_extract.pad(a_ , padding="longest" , return_tensors="np" )[input_name] a__ : List[str] = feat_extract.pad(a_ , padding="longest" , return_tensors="tf" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def UpperCAmelCase ( self : Optional[Any] ) -> List[str]: '''simple docstring''' a__ : Union[str, Any] = self.feat_extract_dict a__ : Dict = True a__ : Dict = self.feature_extraction_class(**a_ ) a__ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common() a__ : List[Any] = [len(a_ ) for x in speech_inputs] a__ : List[str] = feat_extract.model_input_names[0] a__ : int = BatchFeature({input_name: speech_inputs} ) a__ : Tuple = feat_extract.pad(a_ , padding="longest" , return_tensors="np" ) self.assertIn("attention_mask" , a_ ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a_ ) def UpperCAmelCase ( self : List[Any] ) -> List[Any]: '''simple docstring''' a__ : List[str] = self.feat_extract_dict a__ : Any = True a__ : Optional[Any] = self.feature_extraction_class(**a_ ) a__ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common() a__ : Optional[int] = [len(a_ ) for x in speech_inputs] a__ : Tuple = feat_extract.model_input_names[0] a__ : int = BatchFeature({input_name: speech_inputs} ) a__ : Tuple = min(a_ ) a__ : int = feat_extract.pad( a_ , padding="max_length" , max_length=a_ , truncation=a_ , return_tensors="np" ) self.assertIn("attention_mask" , a_ ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
642
0
"""simple docstring""" from __future__ import annotations from typing import Any class _lowerCAmelCase : """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = row, column lowerCAmelCase__ :Dict = [[default_value for c in range(__A )] for r in range(__A )] def __str__( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = F"Matrix consist of {self.row} rows and {self.column} columns\n" # Make string identifier lowerCAmelCase__ :Any = 0 for row_vector in self.array: for obj in row_vector: lowerCAmelCase__ :Dict = max(__A , len(str(__A ) ) ) lowerCAmelCase__ :List[Any] = F"%{max_element_length}s" # Make string and return def single_line(__UpperCAmelCase ) -> str: nonlocal string_format_identifier lowerCAmelCase__ :Any = '[' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(__A ) for row_vector in self.array ) return s def __repr__( self ): '''simple docstring''' return str(self ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if not (isinstance(__A , (list, tuple) ) and len(__A ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self , __UpperCAmelCase ): '''simple docstring''' assert self.validate_indicies(__A ) return self.array[loc[0]][loc[1]] def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' assert self.validate_indicies(__A ) lowerCAmelCase__ :Optional[Any] = value def __add__( self , __UpperCAmelCase ): '''simple docstring''' assert isinstance(__A , __A ) assert self.row == another.row and self.column == another.column # Add lowerCAmelCase__ :str = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): lowerCAmelCase__ :List[str] = self[r, c] + another[r, c] return result def __neg__( self ): '''simple docstring''' lowerCAmelCase__ :Dict = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): lowerCAmelCase__ :Optional[int] = -self[r, c] return result def __sub__( self , __UpperCAmelCase ): '''simple docstring''' return self + (-another) def __mul__( self , __UpperCAmelCase ): '''simple docstring''' if isinstance(__A , (int, float) ): # Scalar multiplication lowerCAmelCase__ :Any = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): lowerCAmelCase__ :Union[str, Any] = self[r, c] * another return result elif isinstance(__A , __A ): # Matrix multiplication assert self.column == another.row lowerCAmelCase__ :Optional[Any] = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: lowerCAmelCase__ :int = F"Unsupported type given for another ({type(__A )})" raise TypeError(__A ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): lowerCAmelCase__ :int = self[r, c] return result def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' assert isinstance(__A , __A ) and isinstance(__A , __A ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate lowerCAmelCase__ :Tuple = v.transpose() lowerCAmelCase__ :Union[str, Any] = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def __A () ->Any: lowerCAmelCase__ :Optional[Any] = Matrix(3 , 3 , 0 ) for i in range(3 ): lowerCAmelCase__ :Dict = 1 print(F"a^(-1) is {ainv}" ) # u, v lowerCAmelCase__ :Any = Matrix(3 , 1 , 0 ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = 1, 2, -3 lowerCAmelCase__ :int = Matrix(3 , 1 , 0 ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = 4, -2, 5 print(F"u is {u}" ) print(F"v is {v}" ) print(F"uv^T is {u * v.transpose()}" ) # Sherman Morrison print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowercase , _lowercase )}" ) def __A () ->Dict: import doctest doctest.testmod() testa()
718
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin __A = logging.get_logger(__name__) enable_full_determinism() class _lowerCAmelCase ( a , a , unittest.TestCase ): """simple docstring""" __magic_name__ :str = UNetaDModel __magic_name__ :Tuple = """sample""" @property def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = 4 lowerCAmelCase__ :Dict = 3 lowerCAmelCase__ :int = (3_2, 3_2) lowerCAmelCase__ :List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase ) lowerCAmelCase__ :Any = torch.tensor([1_0] ).to(__UpperCAmelCase ) return {"sample": noise, "timestep": time_step} @property def snake_case ( self ): '''simple docstring''' return (3, 3_2, 3_2) @property def snake_case ( self ): '''simple docstring''' return (3, 3_2, 3_2) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = { 'block_out_channels': (3_2, 6_4), 'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'), 'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'), 'attention_head_dim': 3, 'out_channels': 3, 'in_channels': 3, 'layers_per_block': 2, 'sample_size': 3_2, } lowerCAmelCase__ :int = self.dummy_input return init_dict, inputs_dict class _lowerCAmelCase ( a , a , unittest.TestCase ): """simple docstring""" __magic_name__ :str = UNetaDModel __magic_name__ :List[str] = """sample""" @property def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = 4 lowerCAmelCase__ :List[Any] = 4 lowerCAmelCase__ :str = (3_2, 3_2) lowerCAmelCase__ :Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = torch.tensor([1_0] ).to(__UpperCAmelCase ) return {"sample": noise, "timestep": time_step} @property def snake_case ( self ): '''simple docstring''' return (4, 3_2, 3_2) @property def snake_case ( self ): '''simple docstring''' return (4, 3_2, 3_2) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = { 'sample_size': 3_2, 'in_channels': 4, 'out_channels': 4, 'layers_per_block': 2, 'block_out_channels': (3_2, 6_4), 'attention_head_dim': 3_2, 'down_block_types': ('DownBlock2D', 'DownBlock2D'), 'up_block_types': ('UpBlock2D', 'UpBlock2D'), } lowerCAmelCase__ :Dict = self.dummy_input return init_dict, inputs_dict def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ :Any = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(__UpperCAmelCase ) lowerCAmelCase__ :int = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase ) model.to(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ :Any = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase ) model_accelerate.to(__UpperCAmelCase ) model_accelerate.eval() lowerCAmelCase__ :List[str] = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) lowerCAmelCase__ :List[str] = noise.to(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = torch.tensor([1_0] * noise.shape[0] ).to(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = model_accelerate(__UpperCAmelCase , __UpperCAmelCase )['sample'] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = UNetaDModel.from_pretrained( 'fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase , low_cpu_mem_usage=__UpperCAmelCase ) model_normal_load.to(__UpperCAmelCase ) model_normal_load.eval() lowerCAmelCase__ :Optional[int] = model_normal_load(__UpperCAmelCase , __UpperCAmelCase )['sample'] assert torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ) model.eval() model.to(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) lowerCAmelCase__ :int = noise.to(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = torch.tensor([1_0] * noise.shape[0] ).to(__UpperCAmelCase ) with torch.no_grad(): lowerCAmelCase__ :Tuple = model(__UpperCAmelCase , __UpperCAmelCase ).sample lowerCAmelCase__ :List[Any] = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off lowerCAmelCase__ :Tuple = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] ) # fmt: on self.assertTrue(torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) ) class _lowerCAmelCase ( a , a , unittest.TestCase ): """simple docstring""" __magic_name__ :Optional[int] = UNetaDModel __magic_name__ :Optional[int] = """sample""" @property def snake_case ( self , __UpperCAmelCase=(3_2, 3_2) ): '''simple docstring''' lowerCAmelCase__ :Dict = 4 lowerCAmelCase__ :int = 3 lowerCAmelCase__ :Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=__UpperCAmelCase ) return {"sample": noise, "timestep": time_step} @property def snake_case ( self ): '''simple docstring''' return (3, 3_2, 3_2) @property def snake_case ( self ): '''simple docstring''' return (3, 3_2, 3_2) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = { 'block_out_channels': [3_2, 6_4, 6_4, 6_4], 'in_channels': 3, 'layers_per_block': 1, 'out_channels': 3, 'time_embedding_type': 'fourier', 'norm_eps': 1E-6, 'mid_block_scale_factor': math.sqrt(2.0 ), 'norm_num_groups': None, 'down_block_types': [ 'SkipDownBlock2D', 'AttnSkipDownBlock2D', 'SkipDownBlock2D', 'SkipDownBlock2D', ], 'up_block_types': [ 'SkipUpBlock2D', 'SkipUpBlock2D', 'AttnSkipUpBlock2D', 'SkipUpBlock2D', ], } lowerCAmelCase__ :Any = self.dummy_input return init_dict, inputs_dict @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ :List[str] = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = self.dummy_input lowerCAmelCase__ :Union[str, Any] = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = noise lowerCAmelCase__ :List[Any] = model(**__UpperCAmelCase ) assert image is not None, "Make sure output is not None" @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' ) model.to(__UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = 4 lowerCAmelCase__ :Any = 3 lowerCAmelCase__ :Dict = (2_5_6, 2_5_6) lowerCAmelCase__ :int = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCAmelCase ) lowerCAmelCase__ :Any = torch.tensor(batch_size * [1E-4] ).to(__UpperCAmelCase ) with torch.no_grad(): lowerCAmelCase__ :str = model(__UpperCAmelCase , __UpperCAmelCase ).sample lowerCAmelCase__ :Tuple = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off lowerCAmelCase__ :int = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] ) # fmt: on self.assertTrue(torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-2 ) ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' ) model.to(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = 4 lowerCAmelCase__ :List[Any] = 3 lowerCAmelCase__ :Dict = (3_2, 3_2) lowerCAmelCase__ :Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCAmelCase ) lowerCAmelCase__ :List[str] = torch.tensor(batch_size * [1E-4] ).to(__UpperCAmelCase ) with torch.no_grad(): lowerCAmelCase__ :Optional[Any] = model(__UpperCAmelCase , __UpperCAmelCase ).sample lowerCAmelCase__ :Any = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off lowerCAmelCase__ :Any = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] ) # fmt: on self.assertTrue(torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-2 ) ) def snake_case ( self ): '''simple docstring''' pass
560
0
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase_ : str = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Tuple = [ 'MRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MraForMaskedLM', 'MraForMultipleChoice', 'MraForQuestionAnswering', 'MraForSequenceClassification', 'MraForTokenClassification', 'MraLayer', 'MraModel', 'MraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
44
'''simple docstring''' from __future__ import annotations from PIL import Image # Define glider example __lowerCAmelCase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowerCAmelCase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = [] for i in range(len(_SCREAMING_SNAKE_CASE ) ): _snake_case = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours _snake_case = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(_SCREAMING_SNAKE_CASE ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(_SCREAMING_SNAKE_CASE ) - 1: neighbour_count += cells[i + 1][j] if i < len(_SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. _snake_case = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(_SCREAMING_SNAKE_CASE ) return next_generation def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _snake_case = [] for _ in range(_SCREAMING_SNAKE_CASE ): # Create output image _snake_case = Image.new("""RGB""" , (len(cells[0] ), len(_SCREAMING_SNAKE_CASE )) ) _snake_case = img.load() # Save cells to image for x in range(len(_SCREAMING_SNAKE_CASE ) ): for y in range(len(cells[0] ) ): _snake_case = 255 - cells[y][x] * 255 _snake_case = (colour, colour, colour) # Save image images.append(_SCREAMING_SNAKE_CASE ) _snake_case = new_generation(_SCREAMING_SNAKE_CASE ) return images if __name__ == "__main__": __lowerCAmelCase = generate_images(GLIDER, 16) images[0].save('out.gif', save_all=True, append_images=images[1:])
585
0
import numpy # List of input, output pairs __a : Optional[int] = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) __a : Dict = (((515, 22, 13), 555), ((61, 35, 49), 150)) __a : List[Any] = [2, 4, 1, 5] __a : int = len(train_data) __a : int = 0.0_0_9 def _SCREAMING_SNAKE_CASE ( __lowercase : List[str] , __lowercase : List[Any]="train" ) -> Any: """simple docstring""" return calculate_hypothesis_value(_UpperCAmelCase , _UpperCAmelCase ) - output( _UpperCAmelCase , _UpperCAmelCase ) def _SCREAMING_SNAKE_CASE ( __lowercase : List[str] ) -> int: """simple docstring""" __A = 0 for i in range(len(_UpperCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def _SCREAMING_SNAKE_CASE ( __lowercase : List[Any] , __lowercase : Tuple ) -> int: """simple docstring""" if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def _SCREAMING_SNAKE_CASE ( __lowercase : Union[str, Any] , __lowercase : Dict ) -> Optional[Any]: """simple docstring""" if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def _SCREAMING_SNAKE_CASE ( __lowercase : List[Any] , __lowercase : Optional[Any]=m ) -> Optional[int]: """simple docstring""" __A = 0 for i in range(_UpperCAmelCase ): if index == -1: summation_value += _error(_UpperCAmelCase ) else: summation_value += _error(_UpperCAmelCase ) * train_data[i][0][index] return summation_value def _SCREAMING_SNAKE_CASE ( __lowercase : Any ) -> Dict: """simple docstring""" __A = summation_of_cost_derivative(_UpperCAmelCase , _UpperCAmelCase ) / m return cost_derivative_value def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" global parameter_vector # Tune these values to set a tolerance value for predicted output __A = 0.000_002 __A = 0 __A = 0 while True: j += 1 __A = [0, 0, 0, 0] for i in range(0 , len(_UpperCAmelCase ) ): __A = get_cost_derivative(i - 1 ) __A = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( _UpperCAmelCase , _UpperCAmelCase , atol=_UpperCAmelCase , rtol=_UpperCAmelCase , ): break __A = temp_parameter_vector print(("""Number of iterations:""", j) ) def _SCREAMING_SNAKE_CASE ( ) -> List[str]: """simple docstring""" for i in range(len(_UpperCAmelCase ) ): print(("""Actual output value:""", output(_UpperCAmelCase , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(_UpperCAmelCase , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print("\nTesting gradient descent for a linear hypothesis function.\n") test_gradient_descent()
716
import argparse import requests import torch from PIL import Image from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> str: """simple docstring""" if "cls_token" in name: __A = name.replace("""cls_token""" , """vit.embeddings.cls_token""" ) if "mask_token" in name: __A = name.replace("""mask_token""" , """decoder.mask_token""" ) if "decoder_pos_embed" in name: __A = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" ) if "pos_embed" in name and "decoder" not in name: __A = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" ) if "patch_embed.proj" in name: __A = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: __A = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" ) if "decoder_blocks" in name: __A = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" ) if "blocks" in name: __A = name.replace("""blocks""" , """vit.encoder.layer""" ) if "attn.proj" in name: __A = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: __A = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: __A = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __A = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: __A = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __A = name.replace("""mlp.fc2""" , """output.dense""" ) if "decoder_embed" in name: __A = name.replace("""decoder_embed""" , """decoder.decoder_embed""" ) if "decoder_norm" in name: __A = name.replace("""decoder_norm""" , """decoder.decoder_norm""" ) if "decoder_pred" in name: __A = name.replace("""decoder_pred""" , """decoder.decoder_pred""" ) if "norm.weight" in name and "decoder" not in name: __A = name.replace("""norm.weight""" , """vit.layernorm.weight""" ) if "norm.bias" in name and "decoder" not in name: __A = name.replace("""norm.bias""" , """vit.layernorm.bias""" ) return name def _SCREAMING_SNAKE_CASE ( __lowercase : Any , __lowercase : Dict ) -> str: """simple docstring""" for key in orig_state_dict.copy().keys(): __A = orig_state_dict.pop(__lowercase ) if "qkv" in key: __A = key.split(""".""" ) __A = int(key_split[1] ) if "decoder_blocks" in key: __A = config.decoder_hidden_size __A = """decoder.decoder_layers.""" if "weight" in key: __A = val[:dim, :] __A = val[dim : dim * 2, :] __A = val[-dim:, :] elif "bias" in key: __A = val[:dim] __A = val[dim : dim * 2] __A = val[-dim:] else: __A = config.hidden_size __A = """vit.encoder.layer.""" if "weight" in key: __A = val[:dim, :] __A = val[dim : dim * 2, :] __A = val[-dim:, :] elif "bias" in key: __A = val[:dim] __A = val[dim : dim * 2] __A = val[-dim:] else: __A = val return orig_state_dict def _SCREAMING_SNAKE_CASE ( __lowercase : Tuple , __lowercase : str ) -> Optional[Any]: """simple docstring""" __A = ViTMAEConfig() if "large" in checkpoint_url: __A = 1_0_2_4 __A = 4_0_9_6 __A = 2_4 __A = 1_6 elif "huge" in checkpoint_url: __A = 1_4 __A = 1_2_8_0 __A = 5_1_2_0 __A = 3_2 __A = 1_6 __A = ViTMAEForPreTraining(__lowercase ) __A = torch.hub.load_state_dict_from_url(__lowercase , map_location="""cpu""" )["""model"""] __A = ViTMAEImageProcessor(size=config.image_size ) __A = convert_state_dict(__lowercase , __lowercase ) model.load_state_dict(__lowercase ) model.eval() __A = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg""" __A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) __A = ViTMAEImageProcessor(size=config.image_size ) __A = image_processor(images=__lowercase , return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) __A = model(**__lowercase ) __A = outputs.logits if "large" in checkpoint_url: __A = torch.tensor( [[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] ) elif "huge" in checkpoint_url: __A = torch.tensor( [[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] ) else: __A = torch.tensor( [[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] ) # verify logits assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1E-4 ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(__lowercase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__lowercase ) if __name__ == "__main__": __a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth", type=str, help="URL of the checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __a : List[str] = parser.parse_args() convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
199
0
import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class lowerCamelCase_ ( lowercase , unittest.TestCase ): __lowercase : Optional[int] = RoCBertTokenizer __lowercase : List[str] = None __lowercase : int = False __lowercase : Optional[Any] = True __lowercase : int = filter_non_english def lowercase ( self ) -> Union[str, Any]: """simple docstring""" super().setUp() _UpperCamelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"] _UpperCamelCase = {} _UpperCamelCase = {} for i, value in enumerate(lowerCamelCase_ ): _UpperCamelCase = i _UpperCamelCase = i _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] ) _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer: json.dump(lowerCamelCase_ , lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer: json.dump(lowerCamelCase_ , lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) def lowercase ( self ) -> int: """simple docstring""" _UpperCamelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) _UpperCamelCase = tokenizer.tokenize("你好[SEP]你是谁" ) self.assertListEqual(lowerCamelCase_ , ["你", "好", "[SEP]", "你", "是", "谁"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowerCamelCase_ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase_ ) , [5, 6, 2, 5, 7, 8] ) def lowercase ( self ) -> str: """simple docstring""" _UpperCamelCase = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def lowercase ( self ) -> Optional[int]: """simple docstring""" _UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def lowercase ( self ) -> List[str]: """simple docstring""" _UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def lowercase ( self ) -> List[Any]: """simple docstring""" _UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def lowercase ( self ) -> Any: """simple docstring""" _UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def lowercase ( self ) -> Optional[int]: """simple docstring""" _UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def lowercase ( self ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def lowercase ( self ) -> Optional[int]: """simple docstring""" _UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase_ , strip_accents=lowerCamelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def lowercase ( self ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase_ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def lowercase ( self ) -> List[str]: """simple docstring""" _UpperCamelCase = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] _UpperCamelCase = {} for i, token in enumerate(lowerCamelCase_ ): _UpperCamelCase = i _UpperCamelCase = RoCBertWordpieceTokenizer(vocab=lowerCamelCase_ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def lowercase ( self ) -> List[str]: """simple docstring""" self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def lowercase ( self ) -> int: """simple docstring""" self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def lowercase ( self ) -> Optional[Any]: """simple docstring""" self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def lowercase ( self ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowerCamelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) if self.test_rust_tokenizer: _UpperCamelCase = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(lowerCamelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) def lowercase ( self ) -> str: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) _UpperCamelCase = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' _UpperCamelCase = tokenizer_r.encode_plus( lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , ) _UpperCamelCase = tokenizer_r.do_lower_case if hasattr(lowerCamelCase_ , "do_lower_case" ) else False _UpperCamelCase = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def lowercase ( self ) -> Dict: """simple docstring""" _UpperCamelCase = ["的", "人", "有"] _UpperCamelCase = "".join(lowerCamelCase_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _UpperCamelCase = True _UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) _UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) _UpperCamelCase = tokenizer_p.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) _UpperCamelCase = tokenizer_r.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) _UpperCamelCase = tokenizer_r.convert_ids_to_tokens(lowerCamelCase_ ) _UpperCamelCase = tokenizer_p.convert_ids_to_tokens(lowerCamelCase_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) _UpperCamelCase = False _UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) _UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) _UpperCamelCase = tokenizer_r.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) _UpperCamelCase = tokenizer_p.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) _UpperCamelCase = tokenizer_r.convert_ids_to_tokens(lowerCamelCase_ ) _UpperCamelCase = tokenizer_p.convert_ids_to_tokens(lowerCamelCase_ ) # it is expected that only the first Chinese character is not preceded by "##". _UpperCamelCase = [ f'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCamelCase_ ) ] self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) @slow def lowercase ( self ) -> Dict: """simple docstring""" _UpperCamelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) _UpperCamelCase = tokenizer.encode("你好" , add_special_tokens=lowerCamelCase_ ) _UpperCamelCase = tokenizer.encode("你是谁" , add_special_tokens=lowerCamelCase_ ) _UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ ) _UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def lowercase ( self ) -> Optional[int]: """simple docstring""" _UpperCamelCase = self.get_tokenizers(do_lower_case=lowerCamelCase_ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): _UpperCamelCase = "你好,你是谁" _UpperCamelCase = tokenizer.tokenize(lowerCamelCase_ ) _UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) _UpperCamelCase = tokenizer.convert_tokens_to_shape_ids(lowerCamelCase_ ) _UpperCamelCase = tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase_ ) _UpperCamelCase = tokenizer.prepare_for_model( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) _UpperCamelCase = tokenizer.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
147
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal __lowerCAmelCase = datasets.utils.logging.get_logger(__name__) __lowerCAmelCase = ["""names""", """prefix"""] __lowerCAmelCase = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""] __lowerCAmelCase = ["""encoding_errors""", """on_bad_lines"""] __lowerCAmelCase = ["""date_format"""] @dataclass class lowerCamelCase_ ( datasets.BuilderConfig ): __lowercase : str = "," __lowercase : Optional[str] = None __lowercase : Optional[Union[int, List[int], str]] = "infer" __lowercase : Optional[List[str]] = None __lowercase : Optional[List[str]] = None __lowercase : Optional[Union[int, str, List[int], List[str]]] = None __lowercase : Optional[Union[List[int], List[str]]] = None __lowercase : Optional[str] = None __lowercase : bool = True __lowercase : Optional[Literal["c", "python", "pyarrow"]] = None __lowercase : Dict[Union[int, str], Callable[[Any], Any]] = None __lowercase : Optional[list] = None __lowercase : Optional[list] = None __lowercase : bool = False __lowercase : Optional[Union[int, List[int]]] = None __lowercase : Optional[int] = None __lowercase : Optional[Union[str, List[str]]] = None __lowercase : bool = True __lowercase : bool = True __lowercase : bool = False __lowercase : bool = True __lowercase : Optional[str] = None __lowercase : str = "." __lowercase : Optional[str] = None __lowercase : str = '"' __lowercase : int = 0 __lowercase : Optional[str] = None __lowercase : Optional[str] = None __lowercase : Optional[str] = None __lowercase : Optional[str] = None __lowercase : bool = True __lowercase : bool = True __lowercase : int = 0 __lowercase : bool = True __lowercase : bool = False __lowercase : Optional[str] = None __lowercase : int = 10000 __lowercase : Optional[datasets.Features] = None __lowercase : Optional[str] = "strict" __lowercase : Literal["error", "warn", "skip"] = "error" __lowercase : Optional[str] = None def lowercase ( self ) -> Any: """simple docstring""" if self.delimiter is not None: _UpperCamelCase = self.delimiter if self.column_names is not None: _UpperCamelCase = self.column_names @property def lowercase ( self ) -> int: """simple docstring""" _UpperCamelCase = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCamelCase_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowerCamelCase_ ( datasets.ArrowBasedBuilder ): __lowercase : Optional[int] = CsvConfig def lowercase ( self ) -> List[Any]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def lowercase ( self , lowerCamelCase_ ) -> Dict: """simple docstring""" if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) _UpperCamelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCamelCase_ , (str, list, tuple) ): _UpperCamelCase = data_files if isinstance(lowerCamelCase_ , lowerCamelCase_ ): _UpperCamelCase = [files] _UpperCamelCase = [dl_manager.iter_files(lowerCamelCase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] _UpperCamelCase = [] for split_name, files in data_files.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): _UpperCamelCase = [files] _UpperCamelCase = [dl_manager.iter_files(lowerCamelCase_ ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCamelCase_ , gen_kwargs={"files": files} ) ) return splits def lowercase ( self , lowerCamelCase_ ) -> pa.Table: """simple docstring""" if self.config.features is not None: _UpperCamelCase = self.config.features.arrow_schema if all(not require_storage_cast(lowerCamelCase_ ) for feature in self.config.features.values() ): # cheaper cast _UpperCamelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCamelCase_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example _UpperCamelCase = table_cast(lowerCamelCase_ , lowerCamelCase_ ) return pa_table def lowercase ( self , lowerCamelCase_ ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str _UpperCamelCase = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCamelCase_ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase_ ) ): _UpperCamelCase = pd.read_csv(lowerCamelCase_ , iterator=lowerCamelCase_ , dtype=lowerCamelCase_ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(lowerCamelCase_ ): _UpperCamelCase = pa.Table.from_pandas(lowerCamelCase_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCamelCase_ ) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(lowerCamelCase_ )}: {e}''' ) raise
147
1
def A_ ( __a : list , __a : int = 0 ): """simple docstring""" a__ = length or len(__a ) a__ = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: a__ , a__ = list_data[i + 1], list_data[i] a__ = True return list_data if not swapped else bubble_sort(__a , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
351
from __future__ import annotations UpperCAmelCase = 8.988E9 # units = N * m^s * C^-2 def A_ ( __a : float , __a : float , __a : float , __a : float ): """simple docstring""" a__ = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if distance < 0: raise ValueError("""Distance cannot be negative""" ) if force == 0: a__ = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: a__ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: a__ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: a__ = (COULOMBS_CONSTANT * charge_product / abs(__a )) ** 0.5 return {"distance": distance} raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
351
1
from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES UpperCAmelCase_ : Any = 'tiny-wmt19-en-ru' # Build # borrowed from a test UpperCAmelCase_ : str = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] UpperCAmelCase_ : Any = dict(zip(vocab, range(len(vocab)))) UpperCAmelCase_ : Union[str, Any] = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ : Optional[int] = Path(tmpdirname) UpperCAmelCase_ : Optional[int] = build_dir / VOCAB_FILES_NAMES['src_vocab_file'] UpperCAmelCase_ : Any = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file'] UpperCAmelCase_ : Union[str, Any] = build_dir / VOCAB_FILES_NAMES['merges_file'] with open(src_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, "w") as fp: fp.write("\n".join(merges)) UpperCAmelCase_ : str = FSMTTokenizer( langs=["en", "ru"], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) UpperCAmelCase_ : Optional[Any] = FSMTConfig( langs=["ru", "en"], src_vocab_size=1000, tgt_vocab_size=1000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) UpperCAmelCase_ : str = FSMTForConditionalGeneration(config) print(F"""num of params {tiny_model.num_parameters()}""") # Test UpperCAmelCase_ : Optional[Any] = tokenizer(["Making tiny model"], return_tensors="pt") UpperCAmelCase_ : str = tiny_model(**batch) print("test output:", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-ru
21
'''simple docstring''' import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" lowerCAmelCase = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def __A ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int]=0 ) -> Optional[int]: """simple docstring""" lowerCAmelCase = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = np.random.RandomState(SCREAMING_SNAKE_CASE ) lowerCAmelCase = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.7_5, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def __A ( self : int ) -> Optional[int]: """simple docstring""" lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**SCREAMING_SNAKE_CASE ).images lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __A ( self : str ) -> List[Any]: """simple docstring""" lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**SCREAMING_SNAKE_CASE ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __A ( self : List[str] ) -> Dict: """simple docstring""" lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) # warmup pass to apply optimizations lowerCAmelCase = pipe(**self.get_dummy_inputs() ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**SCREAMING_SNAKE_CASE ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __A ( self : List[str] ) -> str: """simple docstring""" lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**SCREAMING_SNAKE_CASE ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __A ( self : Dict ) -> Optional[int]: """simple docstring""" lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**SCREAMING_SNAKE_CASE ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __A ( self : int ) -> List[str]: """simple docstring""" lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**SCREAMING_SNAKE_CASE ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) lowerCAmelCase = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @property def __A ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __A ( self : List[str] ) -> Optional[int]: """simple docstring""" lowerCAmelCase = ort.SessionOptions() lowerCAmelCase = False return options def __A ( self : List[Any] ) -> List[Any]: """simple docstring""" lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) lowerCAmelCase = init_image.resize((7_6_8, 5_1_2) ) # using the PNDM scheduler by default lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowerCAmelCase = "A fantasy landscape, trending on artstation" lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE , output_type="np" , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) lowerCAmelCase = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __A ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) lowerCAmelCase = init_image.resize((7_6_8, 5_1_2) ) lowerCAmelCase = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowerCAmelCase = "A fantasy landscape, trending on artstation" lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE , output_type="np" , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) lowerCAmelCase = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
649
0
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 16_00, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 16_00, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCamelCase ( self : Tuple): '''simple docstring''' if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=_lowerCAmelCase , ) assert hasattr(self , 'env') def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : str): '''simple docstring''' __lowercase ={ 'enabled': True, 'processes_per_host': 8, } __lowercase ={ 'enabled': True, 'parameters': { 'microbatches': 4, 'placement_strategy': 'spread', 'pipeline': 'interleaved', 'optimize': 'speed', 'partitions': 4, 'ddp': True, }, } __lowercase ={'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options} __lowercase ='trainer' if self.script == 'run_glue.py' else 'smtrainer' # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=_lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=_lowerCAmelCase , hyperparameters={ **self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path, 'max_steps': 5_0_0, } , metric_definitions=self.env.metric_definitions , distribution=_lowerCAmelCase , py_version='py36' , ) def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : int): '''simple docstring''' TrainingJobAnalytics(_lowerCAmelCase).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""") @parameterized.expand([(1,)]) def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any]): '''simple docstring''' __lowercase =self.create_estimator(_lowerCAmelCase) # run training estimator.fit() # result dataframe __lowercase =TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis __lowercase =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value']) __lowercase =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value']) # get train time from SageMaker job, this includes starting, preprocessing, stopping __lowercase =( Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy) assert all(t <= self.results['eval_loss'] for t in eval_loss) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , 'w') as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _lowerCAmelCase)
454
'''simple docstring''' from functools import lru_cache def _A ( _lowerCAmelCase ): """simple docstring""" __lowercase =2 __lowercase =set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(_lowerCAmelCase ) if n > 1: factors.add(_lowerCAmelCase ) return factors @lru_cache def _A ( _lowerCAmelCase ): """simple docstring""" return len(unique_prime_factors(_lowerCAmelCase ) ) def _A ( _lowerCAmelCase ): """simple docstring""" return len(set(_lowerCAmelCase ) ) in (0, 1) def _A ( _lowerCAmelCase ): """simple docstring""" __lowercase =2 while True: # Increment each value of a generated range __lowercase =[base + i for i in range(_lowerCAmelCase )] # Run elements through out unique_prime_factors function # Append our target number to the end. __lowercase =[upf_len(_lowerCAmelCase ) for x in group] checker.append(_lowerCAmelCase ) # If all numbers in the list are equal, return the group variable. if equality(_lowerCAmelCase ): return group # Increment our base variable by 1 base += 1 def _A ( _lowerCAmelCase = 4 ): """simple docstring""" __lowercase =run(_lowerCAmelCase ) return results[0] if len(_lowerCAmelCase ) else None if __name__ == "__main__": print(solution())
454
1
"""simple docstring""" import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ): A__ : List[Any] = RoFormerTokenizer A__ : Tuple = RoFormerTokenizerFast A__ : Union[str, Any] = True A__ : Optional[Any] = True def __UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" super().setUp() def __UpperCAmelCase ( self : Any , **__lowerCamelCase : Optional[Any] ): """simple docstring""" return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__lowerCamelCase ) def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : List[str] ): """simple docstring""" return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__lowerCamelCase ) def __UpperCAmelCase ( self : Any ): """simple docstring""" _snake_case = '''永和服装饰品有限公司,今天天气非常好''' _snake_case = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好''' return input_text, output_text def __UpperCAmelCase ( self : List[str] ): """simple docstring""" _snake_case = self.get_tokenizer() _snake_case , _snake_case = self.get_chinese_input_output_texts() _snake_case = tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , output_text.split() ) _snake_case = tokens + [tokenizer.unk_token] _snake_case = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) def __UpperCAmelCase ( self : Any ): """simple docstring""" _snake_case = self.get_rust_tokenizer() _snake_case , _snake_case = self.get_chinese_input_output_texts() _snake_case = tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , output_text.split() ) _snake_case = tokens + [tokenizer.unk_token] _snake_case = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase ) def __UpperCAmelCase ( self : int ): """simple docstring""" pass def __UpperCAmelCase ( self : str ): """simple docstring""" pass def __UpperCAmelCase ( self : Any ): """simple docstring""" pass
103
'''simple docstring''' def __lowerCAmelCase ( a_ ) -> bool: '''simple docstring''' if p < 2: raise ValueError('p should not be less than 2!' ) elif p == 2: return True SCREAMING_SNAKE_CASE : Optional[int] = 4 SCREAMING_SNAKE_CASE : Optional[Any] = (1 << p) - 1 for _ in range(p - 2 ): SCREAMING_SNAKE_CASE : Optional[int] = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
251
0
'''simple docstring''' from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class _a (_lowerCamelCase): """simple docstring""" def __init__( self , A__ , A__ ) -> Union[str, Any]: super().__init__() self.register_modules(unet=A__ , scheduler=A__ ) @torch.no_grad() def __call__( self , A__ = 1 , A__ = None , A__ = 50 , A__ = "pil" , A__ = True , **A__ , ) -> Union[ImagePipelineOutput, Tuple]: _SCREAMING_SNAKE_CASE = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A__ , ) _SCREAMING_SNAKE_CASE = image.to(self.device ) # set step values self.scheduler.set_timesteps(A__ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output _SCREAMING_SNAKE_CASE = self.unet(A__ , A__ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 _SCREAMING_SNAKE_CASE = self.scheduler.step(A__ , A__ , A__ ).prev_sample _SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1 ) _SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _SCREAMING_SNAKE_CASE = self.numpy_to_pil(A__ ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=A__ ), "This is a local test"
708
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available UpperCamelCase__ : int = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
0
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ["FNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ["FNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FNetForMaskedLM", "FNetForMultipleChoice", "FNetForNextSentencePrediction", "FNetForPreTraining", "FNetForQuestionAnswering", "FNetForSequenceClassification", "FNetForTokenClassification", "FNetLayer", "FNetModel", "FNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
498
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model") SCREAMING_SNAKE_CASE__ : Optional[int] = get_tests_dir("fixtures/test_sentencepiece_bpe.model") SCREAMING_SNAKE_CASE__ : Any = "pt" if is_torch_available() else "tf" @require_sentencepiece @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = CamembertTokenizer lowercase_ = CamembertTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : Tuple )-> str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : Dict = CamembertTokenizer(a_ ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase( self : Any )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = '<pad>' SCREAMING_SNAKE_CASE__ : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ ) def __lowercase( self : Optional[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>NOTUSED' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(a_ ) , 1004 ) def __lowercase( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1005 ) def __lowercase( self : List[Any] )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = CamembertTokenizer(a_ ) tokenizer.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : int = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : str = 'I was born in 92000, and this is falsé.' SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.encode(a_ ) self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(a_ , add_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_ids_to_tokens(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) def __lowercase( self : Union[str, Any] )-> str: """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ : Tuple = 'I was born in 92000, and this is falsé.' SCREAMING_SNAKE_CASE__ : str = tokenizer.tokenize(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : int = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.encode(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.encode(a_ ) self.assertListEqual(a_ , a_ ) @slow def __lowercase( self : List[str] )-> Dict: """simple docstring""" # fmt: off SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. SCREAMING_SNAKE_CASE__ : str = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=a_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=a_ , )
85
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowercase = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
242
import argparse import collections import json import os import re import string import sys import numpy as np _lowercase = re.compile(r'\b(a|an|the)\b', re.UNICODE) _lowercase = None def __lowerCAmelCase ( ) -> Optional[Any]: '''simple docstring''' lowerCamelCase__: Any = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" ) parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" ) parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" ) parser.add_argument( """--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" ) parser.add_argument( """--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" ) parser.add_argument( """--na-prob-thresh""" , """-t""" , type=_UpperCamelCase , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , ) parser.add_argument( """--out-image-dir""" , """-p""" , metavar="""out_images""" , default=_UpperCamelCase , help="""Save precision-recall curves to directory.""" ) parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def __lowerCAmelCase ( _UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowerCamelCase__: List[str] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowerCamelCase__: Any = bool(qa["""answers"""]["""text"""] ) return qid_to_has_ans def __lowerCAmelCase ( _UpperCamelCase ) -> List[str]: '''simple docstring''' def remove_articles(_UpperCamelCase ): return ARTICLES_REGEX.sub(""" """ , _UpperCamelCase ) def white_space_fix(_UpperCamelCase ): return " ".join(text.split() ) def remove_punc(_UpperCamelCase ): lowerCamelCase__: Any = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_UpperCamelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_UpperCamelCase ) ) ) ) def __lowerCAmelCase ( _UpperCamelCase ) -> int: '''simple docstring''' if not s: return [] return normalize_answer(_UpperCamelCase ).split() def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> List[Any]: '''simple docstring''' return int(normalize_answer(_UpperCamelCase ) == normalize_answer(_UpperCamelCase ) ) def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> Tuple: '''simple docstring''' lowerCamelCase__: Any = get_tokens(_UpperCamelCase ) lowerCamelCase__: Union[str, Any] = get_tokens(_UpperCamelCase ) lowerCamelCase__: List[str] = collections.Counter(_UpperCamelCase ) & collections.Counter(_UpperCamelCase ) lowerCamelCase__: Optional[Any] = sum(common.values() ) if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 lowerCamelCase__: List[str] = 1.0 * num_same / len(_UpperCamelCase ) lowerCamelCase__: Optional[Any] = 1.0 * num_same / len(_UpperCamelCase ) lowerCamelCase__: Dict = (2 * precision * recall) / (precision + recall) return fa def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> Dict: '''simple docstring''' lowerCamelCase__: Any = {} lowerCamelCase__: str = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowerCamelCase__: Dict = qa["""id"""] lowerCamelCase__: Union[str, Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(_UpperCamelCase )] if not gold_answers: # For unanswerable questions, only correct answer is empty string lowerCamelCase__: int = [""""""] if qid not in preds: print(f"""Missing prediction for {qid}""" ) continue lowerCamelCase__: Optional[Any] = preds[qid] # Take max over all gold answers lowerCamelCase__: str = max(compute_exact(_UpperCamelCase , _UpperCamelCase ) for a in gold_answers ) lowerCamelCase__: str = max(compute_fa(_UpperCamelCase , _UpperCamelCase ) for a in gold_answers ) return exact_scores, fa_scores def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: '''simple docstring''' lowerCamelCase__: List[str] = {} for qid, s in scores.items(): lowerCamelCase__: Dict = na_probs[qid] > na_prob_thresh if pred_na: lowerCamelCase__: Optional[int] = float(not qid_to_has_ans[qid] ) else: lowerCamelCase__: str = s return new_scores def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Optional[int]: '''simple docstring''' if not qid_list: lowerCamelCase__: List[str] = len(_UpperCamelCase ) return collections.OrderedDict( [ ("""exact""", 1_00.0 * sum(exact_scores.values() ) / total), ("""f1""", 1_00.0 * sum(fa_scores.values() ) / total), ("""total""", total), ] ) else: lowerCamelCase__: int = len(_UpperCamelCase ) return collections.OrderedDict( [ ("""exact""", 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total), ("""f1""", 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total), ("""total""", total), ] ) def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]: '''simple docstring''' for k in new_eval: lowerCamelCase__: int = new_eval[k] def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: '''simple docstring''' plt.step(_UpperCamelCase , _UpperCamelCase , color="""b""" , alpha=0.2 , where="""post""" ) plt.fill_between(_UpperCamelCase , _UpperCamelCase , step="""post""" , alpha=0.2 , color="""b""" ) plt.xlabel("""Recall""" ) plt.ylabel("""Precision""" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(_UpperCamelCase ) plt.savefig(_UpperCamelCase ) plt.clf() def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> str: '''simple docstring''' lowerCamelCase__: Tuple = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : na_probs[k] ) lowerCamelCase__: str = 0.0 lowerCamelCase__: Optional[int] = 1.0 lowerCamelCase__: List[Any] = 0.0 lowerCamelCase__: Any = [1.0] lowerCamelCase__: Any = [0.0] lowerCamelCase__: List[str] = 0.0 for i, qid in enumerate(_UpperCamelCase ): if qid_to_has_ans[qid]: true_pos += scores[qid] lowerCamelCase__: List[str] = true_pos / float(i + 1 ) lowerCamelCase__: int = true_pos / float(_UpperCamelCase ) if i == len(_UpperCamelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_UpperCamelCase ) recalls.append(_UpperCamelCase ) if out_image: plot_pr_curve(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) return {"ap": 1_00.0 * avg_prec} def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple: '''simple docstring''' if out_image_dir and not os.path.exists(_UpperCamelCase ): os.makedirs(_UpperCamelCase ) lowerCamelCase__: List[str] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return lowerCamelCase__: int = make_precision_recall_eval( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , out_image=os.path.join(_UpperCamelCase , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , ) lowerCamelCase__: Union[str, Any] = make_precision_recall_eval( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , out_image=os.path.join(_UpperCamelCase , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , ) lowerCamelCase__: int = {k: float(_UpperCamelCase ) for k, v in qid_to_has_ans.items()} lowerCamelCase__: List[str] = make_precision_recall_eval( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , out_image=os.path.join(_UpperCamelCase , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , ) merge_eval(_UpperCamelCase , _UpperCamelCase , """pr_exact""" ) merge_eval(_UpperCamelCase , _UpperCamelCase , """pr_f1""" ) merge_eval(_UpperCamelCase , _UpperCamelCase , """pr_oracle""" ) def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]: '''simple docstring''' if not qid_list: return lowerCamelCase__: Dict = [na_probs[k] for k in qid_list] lowerCamelCase__: int = np.ones_like(_UpperCamelCase ) / float(len(_UpperCamelCase ) ) plt.hist(_UpperCamelCase , weights=_UpperCamelCase , bins=20 , range=(0.0, 1.0) ) plt.xlabel("""Model probability of no-answer""" ) plt.ylabel("""Proportion of dataset""" ) plt.title(f"""Histogram of no-answer probability: {name}""" ) plt.savefig(os.path.join(_UpperCamelCase , f"""na_prob_hist_{name}.png""" ) ) plt.clf() def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str: '''simple docstring''' lowerCamelCase__: List[str] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) lowerCamelCase__: List[str] = num_no_ans lowerCamelCase__: List[Any] = cur_score lowerCamelCase__: Tuple = 0.0 lowerCamelCase__: Any = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : na_probs[k] ) for i, qid in enumerate(_UpperCamelCase ): if qid not in scores: continue if qid_to_has_ans[qid]: lowerCamelCase__: int = scores[qid] else: if preds[qid]: lowerCamelCase__: List[Any] = -1 else: lowerCamelCase__: Any = 0 cur_score += diff if cur_score > best_score: lowerCamelCase__: List[Any] = cur_score lowerCamelCase__: Union[str, Any] = na_probs[qid] return 1_00.0 * best_score / len(_UpperCamelCase ), best_thresh def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: List[Any] = find_best_thresh(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) lowerCamelCase__ , lowerCamelCase__: int = find_best_thresh(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) lowerCamelCase__: int = best_exact lowerCamelCase__: int = exact_thresh lowerCamelCase__: Optional[Any] = best_fa lowerCamelCase__: Union[str, Any] = fa_thresh def __lowerCAmelCase ( ) -> Optional[int]: '''simple docstring''' with open(OPTS.data_file ) as f: lowerCamelCase__: Any = json.load(_UpperCamelCase ) lowerCamelCase__: List[Any] = dataset_json["""data"""] with open(OPTS.pred_file ) as f: lowerCamelCase__: str = json.load(_UpperCamelCase ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: lowerCamelCase__: Any = json.load(_UpperCamelCase ) else: lowerCamelCase__: Dict = {k: 0.0 for k in preds} lowerCamelCase__: Dict = make_qid_to_has_ans(_UpperCamelCase ) # maps qid to True/False lowerCamelCase__: Any = [k for k, v in qid_to_has_ans.items() if v] lowerCamelCase__: Dict = [k for k, v in qid_to_has_ans.items() if not v] lowerCamelCase__ , lowerCamelCase__: Dict = get_raw_scores(_UpperCamelCase , _UpperCamelCase ) lowerCamelCase__: Union[str, Any] = apply_no_ans_threshold(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , OPTS.na_prob_thresh ) lowerCamelCase__: Tuple = apply_no_ans_threshold(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , OPTS.na_prob_thresh ) lowerCamelCase__: Tuple = make_eval_dict(_UpperCamelCase , _UpperCamelCase ) if has_ans_qids: lowerCamelCase__: Optional[Any] = make_eval_dict(_UpperCamelCase , _UpperCamelCase , qid_list=_UpperCamelCase ) merge_eval(_UpperCamelCase , _UpperCamelCase , """HasAns""" ) if no_ans_qids: lowerCamelCase__: Optional[Any] = make_eval_dict(_UpperCamelCase , _UpperCamelCase , qid_list=_UpperCamelCase ) merge_eval(_UpperCamelCase , _UpperCamelCase , """NoAns""" ) if OPTS.na_prob_file: find_all_best_thresh(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , OPTS.out_image_dir ) histogram_na_prob(_UpperCamelCase , _UpperCamelCase , OPTS.out_image_dir , """hasAns""" ) histogram_na_prob(_UpperCamelCase , _UpperCamelCase , OPTS.out_image_dir , """noAns""" ) if OPTS.out_file: with open(OPTS.out_file , """w""" ) as f: json.dump(_UpperCamelCase , _UpperCamelCase ) else: print(json.dumps(_UpperCamelCase , indent=2 ) ) if __name__ == "__main__": _lowercase = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main()
242
1
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) __A : List[Any] = _symbol_database.Default() __A : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile( B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) __A : List[Any] = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: __A : int = None __A : List[str] = B"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" __A : int = 45 __A : int = 1_581 __A : List[Any] = 1_517 __A : Optional[Any] = 1_570 __A : List[str] = 1_584 __A : Dict = 1_793 __A : str = 1_795 __A : Tuple = 1_916 __A : str = 1_864 __A : Union[str, Any] = 1_905 __A : int = 1_919 __A : List[Any] = 2_429 __A : Any = 2_208 __A : Tuple = 2_418 __A : Union[str, Any] = 2_323 __A : Union[str, Any] = 2_407 # @@protoc_insertion_point(module_scope)
27
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __magic_name__ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" def __init__( self , lowerCAmelCase__): super().__init__() __SCREAMING_SNAKE_CASE = nn.ModuleList(lowerCAmelCase__) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = True , ): for i, (image, scale, controlnet) in enumerate(zip(lowerCAmelCase__ , lowerCAmelCase__ , self.nets)): __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = controlnet( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) # merge samples if i == 0: __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = down_samples, mid_sample else: __SCREAMING_SNAKE_CASE = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(lowerCAmelCase__ , lowerCAmelCase__) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ): __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = save_directory for controlnet in self.nets: controlnet.save_pretrained( lowerCAmelCase__ , is_main_process=lowerCAmelCase__ , save_function=lowerCAmelCase__ , safe_serialization=lowerCAmelCase__ , variant=lowerCAmelCase__ , ) idx += 1 __SCREAMING_SNAKE_CASE = model_path_to_save + f"_{idx}" @classmethod def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__): __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... __SCREAMING_SNAKE_CASE = pretrained_model_path while os.path.isdir(lowerCAmelCase__): __SCREAMING_SNAKE_CASE = ControlNetModel.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__) controlnets.append(lowerCAmelCase__) idx += 1 __SCREAMING_SNAKE_CASE = pretrained_model_path + f"_{idx}" logger.info(f"{len(lowerCAmelCase__)} controlnets loaded from {pretrained_model_path}.") if len(lowerCAmelCase__) == 0: raise ValueError( f"No ControlNets found under {os.path.dirname(lowerCAmelCase__)}. Expected at least {pretrained_model_path + '_0'}.") return cls(lowerCAmelCase__)
155
0
"""simple docstring""" from __future__ import annotations class SCREAMING_SNAKE_CASE__ : def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : int=None ): lowerCamelCase__ = data lowerCamelCase__ = None def __repr__( self : List[Any] ): lowerCamelCase__ = [] lowerCamelCase__ = self while temp: string_rep.append(f"""{temp.data}""" ) lowerCamelCase__ = temp.next return "->".join(SCREAMING_SNAKE_CASE_ ) def _A ( __lowercase ): """simple docstring""" if not elements_list: raise Exception("""The Elements List is empty""" ) lowerCamelCase__ = lowerCamelCase__ = Node(elements_list[0] ) for i in range(1 , len(__lowercase ) ): lowerCamelCase__ = Node(elements_list[i] ) lowerCamelCase__ = current.next return head def _A ( __lowercase ): """simple docstring""" if head_node is not None and isinstance(__lowercase , __lowercase ): print_reverse(head_node.next ) print(head_node.data ) def _A ( ): """simple docstring""" from doctest import testmod testmod() lowerCamelCase__ = make_linked_list([14, 52, 14, 12, 43] ) print("""Linked List:""" ) print(__lowercase ) print("""Elements in Reverse:""" ) print_reverse(__lowercase ) if __name__ == "__main__": main()
258
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __magic_name__ = { """configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ """GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTBigCodeForSequenceClassification""", """GPTBigCodeForTokenClassification""", """GPTBigCodeForCausalLM""", """GPTBigCodeModel""", """GPTBigCodePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
258
1