code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def a__ ( A__ ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = FileLock(str(tmpdir / 'foo.lock' ) ) SCREAMING_SNAKE_CASE_ : List[str] = FileLock(str(tmpdir / 'foo.lock' ) ) SCREAMING_SNAKE_CASE_ : Tuple = 0.01 with locka.acquire(): with pytest.raises(A__ ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = time.time() locka.acquire(A__ ) assert time.time() - _start > timeout def a__ ( A__ ): SCREAMING_SNAKE_CASE_ : List[Any] = 'a' * 1_0_0_0 + '.lock' SCREAMING_SNAKE_CASE_ : int = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('.lock' ) assert not locka._lock_file.endswith(A__ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 SCREAMING_SNAKE_CASE_ : Optional[int] = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(A__ ): locka.acquire(0 )
101
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """MBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """MBartForCausalLM""", """MBartForConditionalGeneration""", """MBartForQuestionAnswering""", """MBartForSequenceClassification""", """MBartModel""", """MBartPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """TFMBartForConditionalGeneration""", """TFMBartModel""", """TFMBartPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """FlaxMBartForConditionalGeneration""", """FlaxMBartForQuestionAnswering""", """FlaxMBartForSequenceClassification""", """FlaxMBartModel""", """FlaxMBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
"""simple docstring""" __magic_name__ : str = { """A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""", """H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""", """O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""", """V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""", """2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""", """8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""", """:""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""", """?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""", """(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/""" } # Exclamation mark is not in ITU-R recommendation # fmt: on __magic_name__ : int = {value: key for key, value in MORSE_CODE_DICT.items()} def UpperCamelCase (SCREAMING_SNAKE_CASE ): return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def UpperCamelCase (SCREAMING_SNAKE_CASE ): return "".join(REVERSE_DICT[char] for char in message.split() ) def UpperCamelCase (): UpperCamelCase : Any = """Morse code here!""" print(SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = encrypt(SCREAMING_SNAKE_CASE ) print(SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = decrypt(SCREAMING_SNAKE_CASE ) print(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
102
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
"""simple docstring""" from __future__ import annotations import os from typing import Any import requests snake_case = '''https://api.github.com''' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user snake_case = BASE_URL + '''/user''' # https://github.com/settings/tokens snake_case = os.environ.get('''USER_TOKEN''', '''''') def snake_case ( lowerCAmelCase_ ) -> dict[Any, Any]: _snake_case = { '''Authorization''': f"""token {auth_token}""", '''Accept''': '''application/vnd.github.v3+json''', } return requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(F"{key}: {value}") else: raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
103
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCamelCase_ = logging.getLogger(__name__) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]: return (preds == labels).mean() @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} ) lowerCamelCase_ = field(metadata={'help': 'Should contain the data files for the task.'} ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def _lowerCAmelCase ( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowercase , lowercase , lowercase : List[Any] =parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __magic_name__ ) # Set seed set_seed(training_args.seed ) try: lowercase : Any =processors[data_args.task_name]() lowercase : Optional[int] =processor.get_labels() lowercase : str =len(__magic_name__ ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase : List[str] =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) lowercase : int =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase : Any =AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , ) # Get datasets lowercase : int =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowercase : Union[str, Any] =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__magic_name__ : EvalPrediction ) -> Dict: lowercase : Dict =np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(__magic_name__ , p.label_ids )} # Data collator lowercase : List[str] =DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowercase : Dict =Trainer( model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowercase : Optional[Any] ={} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowercase : List[Any] =trainer.evaluate() lowercase : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(__magic_name__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ ) writer.write('''%s = %s\n''' % (key, value) ) results.update(__magic_name__ ) return results def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
92
0
"""simple docstring""" import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : Dict, UpperCAmelCase_ : str ) -> List[str]: """simple docstring""" A__ = os.path.abspath(UpperCAmelCase_ ) logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" ) # Load weights from TF model A__ = tf.train.list_variables(UpperCAmelCase_ ) A__ = [] A__ = [] A__ = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") A__ = full_name.split("/" ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(F"""Skipping non-model layer {full_name}""" ) continue if "optimizer" in full_name: logger.info(F"""Skipping optimization layer {full_name}""" ) continue if name[0] == "model": # ignore initial 'model' A__ = name[1:] # figure out how many levels deep the name is A__ = 0 for _name in name: if _name.startswith("layer_with_weights" ): depth += 1 else: break layer_depth.append(UpperCAmelCase_ ) # read data A__ = tf.train.load_variable(UpperCAmelCase_, UpperCAmelCase_ ) names.append("/".join(UpperCAmelCase_ ) ) arrays.append(UpperCAmelCase_ ) logger.info(F"""Read a total of {len(UpperCAmelCase_ ):,} layers""" ) # Sanity check if len(set(UpperCAmelCase_ ) ) != 1: raise ValueError(F"""Found layer names with different depths (layer depth {list(set(UpperCAmelCase_ ) )})""" ) A__ = list(set(UpperCAmelCase_ ) )[0] if layer_depth != 1: raise ValueError( "The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP" " heads." ) # convert layers logger.info("Converting weights..." ) for full_name, array in zip(UpperCAmelCase_, UpperCAmelCase_ ): A__ = full_name.split("/" ) A__ = model A__ = [] for i, m_name in enumerate(UpperCAmelCase_ ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith("layer_with_weights" ): A__ = int(m_name.split("-" )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(["embeddings", "LayerNorm"] ) A__ = getattr(UpperCAmelCase_, "embeddings" ) A__ = getattr(UpperCAmelCase_, "LayerNorm" ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(["encoder", "layer", str(layer_num - 4 )] ) A__ = getattr(UpperCAmelCase_, "encoder" ) A__ = getattr(UpperCAmelCase_, "layer" ) A__ = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(["pooler", "dense"] ) A__ = getattr(UpperCAmelCase_, "pooler" ) A__ = getattr(UpperCAmelCase_, "dense" ) elif m_name == "embeddings": trace.append("embeddings" ) A__ = getattr(UpperCAmelCase_, "embeddings" ) if layer_num == 0: trace.append("word_embeddings" ) A__ = getattr(UpperCAmelCase_, "word_embeddings" ) elif layer_num == 1: trace.append("position_embeddings" ) A__ = getattr(UpperCAmelCase_, "position_embeddings" ) elif layer_num == 2: trace.append("token_type_embeddings" ) A__ = getattr(UpperCAmelCase_, "token_type_embeddings" ) else: raise ValueError(F"""Unknown embedding layer with name {full_name}""" ) trace.append("weight" ) A__ = getattr(UpperCAmelCase_, "weight" ) elif m_name == "_attention_layer": # self-attention layer trace.extend(["attention", "self"] ) A__ = getattr(UpperCAmelCase_, "attention" ) A__ = getattr(UpperCAmelCase_, "self" ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(["attention", "output", "LayerNorm"] ) A__ = getattr(UpperCAmelCase_, "attention" ) A__ = getattr(UpperCAmelCase_, "output" ) A__ = getattr(UpperCAmelCase_, "LayerNorm" ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(["attention", "output", "dense"] ) A__ = getattr(UpperCAmelCase_, "attention" ) A__ = getattr(UpperCAmelCase_, "output" ) A__ = getattr(UpperCAmelCase_, "dense" ) elif m_name == "_output_dense": # output dense trace.extend(["output", "dense"] ) A__ = getattr(UpperCAmelCase_, "output" ) A__ = getattr(UpperCAmelCase_, "dense" ) elif m_name == "_output_layer_norm": # output dense trace.extend(["output", "LayerNorm"] ) A__ = getattr(UpperCAmelCase_, "output" ) A__ = getattr(UpperCAmelCase_, "LayerNorm" ) elif m_name == "_key_dense": # attention key trace.append("key" ) A__ = getattr(UpperCAmelCase_, "key" ) elif m_name == "_query_dense": # attention query trace.append("query" ) A__ = getattr(UpperCAmelCase_, "query" ) elif m_name == "_value_dense": # attention value trace.append("value" ) A__ = getattr(UpperCAmelCase_, "value" ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(["intermediate", "dense"] ) A__ = getattr(UpperCAmelCase_, "intermediate" ) A__ = getattr(UpperCAmelCase_, "dense" ) elif m_name == "_output_layer_norm": # output layer norm trace.append("output" ) A__ = getattr(UpperCAmelCase_, "output" ) # weights & biases elif m_name in ["bias", "beta"]: trace.append("bias" ) A__ = getattr(UpperCAmelCase_, "bias" ) elif m_name in ["kernel", "gamma"]: trace.append("weight" ) A__ = getattr(UpperCAmelCase_, "weight" ) else: logger.warning(F"""Ignored {m_name}""" ) # for certain layers reshape is necessary A__ = ".".join(UpperCAmelCase_ ) if re.match(r"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)", UpperCAmelCase_ ) or re.match( r"(\S+)\.attention\.output\.dense\.weight", UpperCAmelCase_ ): A__ = array.reshape(pointer.data.shape ) if "kernel" in full_name: A__ = array.transpose() if pointer.shape == array.shape: A__ = torch.from_numpy(UpperCAmelCase_ ) else: raise ValueError( F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:""" F""" {array.shape}""" ) logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" ) return model def _lowerCamelCase ( UpperCAmelCase_ : Any, UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Any ) -> List[str]: """simple docstring""" logger.info(F"""Loading model based on config from {config_path}...""" ) A__ = BertConfig.from_json_file(UpperCAmelCase_ ) A__ = BertModel(UpperCAmelCase_ ) # Load weights from checkpoint logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" ) load_tfa_weights_in_bert(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ) # Save pytorch-model logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" ) torch.save(model.state_dict(), UpperCAmelCase_ ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model (must include filename).""", ) UpperCamelCase = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
104
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu""" def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple=100 , __magic_name__ : Optional[int]=" " ) -> List[str]: lowercase : List[Any] =text.split(__magic_name__ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )] def _lowerCAmelCase ( __magic_name__ : dict ) -> dict: lowercase , lowercase : int =[], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(__magic_name__ ): titles.append(title if title is not None else '''''' ) texts.append(__magic_name__ ) return {"title": titles, "text": texts} def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : DPRContextEncoder , __magic_name__ : DPRContextEncoderTokenizerFast ) -> dict: lowercase : Dict =ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=__magic_name__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids'''] lowercase : Optional[int] =ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def _lowerCAmelCase ( __magic_name__ : "RagExampleArguments" , __magic_name__ : "ProcessingArguments" , __magic_name__ : "IndexHnswArguments" , ) -> str: ###################################### logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase : Tuple =load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase : Optional[int] =dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc ) # And compute the embeddings lowercase : Any =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ ) lowercase : Any =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowercase : Optional[int] =Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space lowercase : Optional[Any] =dataset.map( partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , ) # And finally save your dataset lowercase : Optional[Any] =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(__magic_name__ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase : Union[str, Any] =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=__magic_name__ ) # And save the index lowercase : Dict =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(__magic_name__ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) lowerCamelCase_ = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) lowerCamelCase_ = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=lowercase__ , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) lowerCamelCase_ = field( default=16 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=7_68 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
92
0
import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Optional[int] = downstream_dict['projector.weight'] SCREAMING_SNAKE_CASE_ : Tuple = downstream_dict['projector.bias'] SCREAMING_SNAKE_CASE_ : List[str] = downstream_dict['model.post_net.linear.weight'] SCREAMING_SNAKE_CASE_ : Dict = downstream_dict['model.post_net.linear.bias'] return model def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Optional[int] = downstream_dict['model.linear.weight'] SCREAMING_SNAKE_CASE_ : str = downstream_dict['model.linear.bias'] return model def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Tuple = downstream_dict['connector.weight'] SCREAMING_SNAKE_CASE_ : Any = downstream_dict['connector.bias'] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): SCREAMING_SNAKE_CASE_ : Any = downstream_dict[ F'model.framelevel_feature_extractor.module.{i}.kernel.weight' ] SCREAMING_SNAKE_CASE_ : Dict = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias'] SCREAMING_SNAKE_CASE_ : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight'] SCREAMING_SNAKE_CASE_ : str = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias'] SCREAMING_SNAKE_CASE_ : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight'] SCREAMING_SNAKE_CASE_ : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias'] SCREAMING_SNAKE_CASE_ : Optional[Any] = downstream_dict['objective.W'] return model @torch.no_grad() def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.load(lowerCamelCase_ , map_location='cpu' ) SCREAMING_SNAKE_CASE_ : Tuple = checkpoint['Downstream'] SCREAMING_SNAKE_CASE_ : List[Any] = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained( lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : int = hf_config.architectures[0] if arch.endswith('ForSequenceClassification' ): SCREAMING_SNAKE_CASE_ : Optional[Any] = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) elif arch.endswith('ForAudioFrameClassification' ): SCREAMING_SNAKE_CASE_ : Optional[Any] = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) elif arch.endswith('ForXVector' ): SCREAMING_SNAKE_CASE_ : Any = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) else: raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' ) if hf_config.use_weighted_layer_sum: SCREAMING_SNAKE_CASE_ : str = checkpoint['Featurizer']['weights'] hf_feature_extractor.save_pretrained(lowerCamelCase_ ) hf_model.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": UpperCamelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') UpperCamelCase__ : List[str] = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
105
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right UpperCamelCase_ = 128022 UpperCamelCase_ = 128028 @require_sentencepiece class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = MaMaaaTokenizer lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = True def lowerCamelCase_ ( self : Dict ): '''simple docstring''' super().setUp() lowercase : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] lowercase : List[Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) lowercase : List[Any] =Path(self.tmpdirname ) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowercase : Tuple =MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : int ): '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict ): '''simple docstring''' return ( "This is a test", "This is a test", ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Tuple ='''</s>''' lowercase : Union[str, Any] =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.get_tokenizer() lowercase : Optional[Any] =list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''</s>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''<s>''' ) self.assertEqual(len(UpperCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('''Skip this test while all models are still to be uploaded.''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Union[str, Any] =self.get_tokenizer() lowercase : str =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2, 3, 4, 5, 6] , ) lowercase : Optional[int] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) lowercase : Tuple =tokenizer.convert_tokens_to_string(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , '''This is a test''' ) @slow def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' # fmt: off lowercase : int ={'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): lowerCamelCase_ = 'facebook/m2m100_418M' lowerCamelCase_ = [ 'In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence', ] lowerCamelCase_ = [ 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', ] # fmt: off lowerCamelCase_ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2] @classmethod def lowerCamelCase_ ( cls : Optional[Any] ): '''simple docstring''' lowercase : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' ) lowercase : Optional[int] =1 return cls def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 ) self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 ) self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 ) self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : List[str] =self.tokenizer.get_vocab() self.assertEqual(len(UpperCAmelCase__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab['''<unk>'''] , 3 ) self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : List[Any] ='''en''' lowercase : Optional[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids ) # fmt: off lowercase : str =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on lowercase : Optional[Any] =self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) lowercase : Optional[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =tempfile.mkdtemp() lowercase : Tuple =self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(UpperCAmelCase__ ) lowercase : Union[str, Any] =MaMaaaTokenizer.from_pretrained(UpperCAmelCase__ ) self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase__ ) @require_torch def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[str] ='''en''' lowercase : int ='''fr''' lowercase : Union[str, Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors='''pt''' ) lowercase : str =shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: lowercase : int =batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[int] ='''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) lowercase : Union[str, Any] ='''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int ='''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) lowercase : Optional[Any] ='''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Optional[Any] =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' ) self.assertEqual( nested_simplify(UpperCAmelCase__ ) , { # en_XX, A, test, EOS '''input_ids''': [[128022, 58, 4183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128006, } , )
92
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ): A_ : List[str] = ShapEPipeline A_ : Dict = ['prompt'] A_ : Dict = ['prompt'] A_ : Union[str, Any] = [ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] A_ : List[Any] = False @property def __UpperCamelCase ( self : int ) -> str: return 32 @property def __UpperCamelCase ( self : Tuple ) -> Optional[int]: return 32 @property def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]: return self.time_input_dim * 4 @property def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]: return 8 @property def __UpperCamelCase ( self : List[str] ) -> Dict: A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def __UpperCamelCase ( self : Optional[Any] ) -> Dict: torch.manual_seed(0 ) A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(__UpperCamelCase ) @property def __UpperCamelCase ( self : Dict ) -> List[Any]: torch.manual_seed(0 ) A = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } A = PriorTransformer(**__UpperCamelCase ) return model @property def __UpperCamelCase ( self : Any ) -> str: torch.manual_seed(0 ) A = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } A = ShapERenderer(**__UpperCamelCase ) return model def __UpperCamelCase ( self : List[str] ) -> int: A = self.dummy_prior A = self.dummy_text_encoder A = self.dummy_tokenizer A = self.dummy_renderer A = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=__UpperCamelCase , clip_sample=__UpperCamelCase , clip_sample_range=1.0 , ) A = { 'prior': prior, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'renderer': renderer, 'scheduler': scheduler, } return components def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict=0 ) -> Any: if str(__UpperCamelCase ).startswith('mps' ): A = torch.manual_seed(__UpperCamelCase ) else: A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) A = { 'prompt': 'horse', 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: A = 'cpu' A = self.get_dummy_components() A = self.pipeline_class(**__UpperCamelCase ) A = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = pipe(**self.get_dummy_inputs(__UpperCamelCase ) ) A = output.images[0] A = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) A = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCamelCase ( self : Tuple ) -> Optional[int]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __UpperCamelCase ( self : List[Any] ) -> str: A = torch_device == 'cpu' A = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__UpperCamelCase , relax_max_difference=__UpperCamelCase , ) def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: A = self.get_dummy_components() A = self.pipeline_class(**__UpperCamelCase ) A = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = 1 A = 2 A = self.get_dummy_inputs(__UpperCamelCase ) for key in inputs.keys(): if key in self.batch_params: A = batch_size * [inputs[key]] A = pipe(**__UpperCamelCase , num_images_per_prompt=__UpperCamelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __UpperCamelCase ( self : str ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : Any ) -> int: A = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy' ) A = ShapEPipeline.from_pretrained('openai/shap-e' ) A = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = torch.Generator(device=__UpperCamelCase ).manual_seed(0 ) A = pipe( 'a shark' , generator=__UpperCamelCase , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
106
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int: try: lowercase : Any =int(__magic_name__ ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) lowercase : Optional[Any] =2 lowercase : Dict =0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 lowercase : Union[str, Any] =i while n % i == 0: lowercase : Optional[int] =n // i i += 1 return int(__magic_name__ ) if __name__ == "__main__": print(f'''{solution() = }''')
92
0
'''simple docstring''' from __future__ import annotations _UpperCAmelCase : Optional[int] = [] def _SCREAMING_SNAKE_CASE ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int ): for i in range(len(__snake_case ) ): if board[row][i] == 1: return False for i in range(len(__snake_case ) ): if board[i][column] == 1: return False for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , len(__snake_case ) ) ): if board[i][j] == 1: return False return True def _SCREAMING_SNAKE_CASE ( __snake_case : list[list[int]] , __snake_case : int ): if row >= len(__snake_case ): solution.append(__snake_case ) printboard(__snake_case ) print() return True for i in range(len(__snake_case ) ): if is_safe(__snake_case , __snake_case , __snake_case ): _A = 1 solve(__snake_case , row + 1 ) _A = 0 return False def _SCREAMING_SNAKE_CASE ( __snake_case : list[list[int]] ): for i in range(len(__snake_case ) ): for j in range(len(__snake_case ) ): if board[i][j] == 1: print('Q' , end=' ' ) else: print('.' , end=' ' ) print() # n=int(input("The no. of queens")) _UpperCAmelCase : Any = 8 _UpperCAmelCase : Dict = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
107
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'speech_to_text_2' lowerCamelCase_ = ['past_key_values'] lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : int , UpperCAmelCase__ : Dict=10000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1024 , **UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : List[str] =vocab_size lowercase : Optional[int] =d_model lowercase : Optional[Any] =decoder_ffn_dim lowercase : Any =decoder_layers lowercase : Dict =decoder_attention_heads lowercase : List[Any] =dropout lowercase : List[Any] =attention_dropout lowercase : Any =activation_dropout lowercase : Optional[Any] =activation_function lowercase : Optional[int] =init_std lowercase : Dict =decoder_layerdrop lowercase : Optional[int] =use_cache lowercase : Optional[Any] =decoder_layers lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True lowercase : str =max_target_positions super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
92
0
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase ( self : Union[str, Any] ) -> None: """simple docstring""" _UpperCAmelCase = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _UpperCAmelCase = Vector() def lowerCamelCase ( self : Tuple ) -> None: """simple docstring""" _UpperCAmelCase = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(lowerCamelCase ) , """(0,0,0,0,0,1)""" ) def lowerCamelCase ( self : Optional[int] ) -> None: """simple docstring""" _UpperCAmelCase = Vector([1, 2, 3, 4] ) self.assertEqual(len(lowerCamelCase ) , 4 ) def lowerCamelCase ( self : Tuple ) -> None: """simple docstring""" _UpperCAmelCase = Vector([1, 2] ) _UpperCAmelCase = Vector([1, 2, 3, 4, 5] ) _UpperCAmelCase = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _UpperCAmelCase = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCamelCase ( self : Dict ) -> None: """simple docstring""" _UpperCAmelCase = Vector([1, 2, 3] ) _UpperCAmelCase = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCamelCase ( self : Dict ) -> None: """simple docstring""" _UpperCAmelCase = Vector([1, 2, 3] ) _UpperCAmelCase = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCamelCase ( self : int ) -> None: """simple docstring""" _UpperCAmelCase = Vector([1, 2, 3] ) _UpperCAmelCase = Vector([2, -1, 4] ) # for test of dot product _UpperCAmelCase = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" ) self.assertEqual((a * b) , 0 ) def lowerCamelCase ( self : Optional[int] ) -> None: """simple docstring""" self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 ) def lowerCamelCase ( self : Optional[Any] ) -> None: """simple docstring""" self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" ) def lowerCamelCase ( self : Any ) -> None: """simple docstring""" _UpperCAmelCase = Vector([1, 2, 3] ) _UpperCAmelCase = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , lowerCamelCase , lowerCamelCase ) ) , """(3,4,7)""" ) def lowerCamelCase ( self : Union[str, Any] ) -> None: """simple docstring""" _UpperCAmelCase = Vector([1, 0, 0, 0, 0, 0] ) _UpperCAmelCase = x.copy() self.assertEqual(str(lowerCamelCase ) , str(lowerCamelCase ) ) def lowerCamelCase ( self : Tuple ) -> None: """simple docstring""" _UpperCAmelCase = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(lowerCamelCase ) , """(0,1,0)""" ) def lowerCamelCase ( self : Union[str, Any] ) -> None: """simple docstring""" _UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCamelCase ) ) def lowerCamelCase ( self : str ) -> None: """simple docstring""" _UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _UpperCAmelCase = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(lowerCamelCase , lowerCamelCase ) ) def lowerCamelCase ( self : str ) -> None: """simple docstring""" _UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _UpperCAmelCase = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(lowerCamelCase , lowerCamelCase ) ) def lowerCamelCase ( self : List[str] ) -> None: """simple docstring""" _UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCamelCase ( self : List[str] ) -> None: """simple docstring""" _UpperCAmelCase = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _UpperCAmelCase = Vector([1, 2, 3] ) self.assertEqual("""(14,32,50)""" , str(a * x ) ) self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) ) def lowerCamelCase ( self : Any ) -> None: """simple docstring""" _UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCamelCase ) ) def lowerCamelCase ( self : int ) -> None: """simple docstring""" _UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCamelCase ( self : Optional[Any] ) -> None: """simple docstring""" _UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _UpperCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) ) def lowerCamelCase ( self : Tuple ) -> None: """simple docstring""" _UpperCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _UpperCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) ) def lowerCamelCase ( self : List[str] ) -> None: """simple docstring""" self.assertEqual( """|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
108
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=[10, 20, 30, 40] , UpperCAmelCase__ : Any=[2, 2, 3, 2] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Dict=[2, 3, 4] , UpperCAmelCase__ : Optional[int]=None , ): '''simple docstring''' lowercase : List[Any] =parent lowercase : Tuple =batch_size lowercase : List[str] =image_size lowercase : List[Any] =num_channels lowercase : Union[str, Any] =num_stages lowercase : int =hidden_sizes lowercase : Any =depths lowercase : Tuple =is_training lowercase : str =use_labels lowercase : List[Any] =intermediate_size lowercase : int =hidden_act lowercase : Union[str, Any] =num_labels lowercase : Optional[int] =initializer_range lowercase : int =out_features lowercase : List[str] =out_indices lowercase : str =scope def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : Dict =None if self.use_labels: lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_labels ) lowercase : Dict =self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Any ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' lowercase : Dict =ConvNextVaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Dict =ConvNextVaForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Union[str, Any] =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[int] =model(UpperCAmelCase__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase : Optional[Any] =None lowercase : str =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Any =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : str =config_and_inputs lowercase : Any ={'''pixel_values''': pixel_values} return config, inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : str =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : List[str] =config_and_inputs lowercase : Optional[Any] ={'''pixel_values''': pixel_values, '''labels''': labels} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCamelCase_ = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Dict =ConvNextVaModelTester(self ) lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Any ): '''simple docstring''' return @unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : Optional[int] =True if model_class.__name__ in [ *get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ ), ]: continue lowercase : Dict =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : List[Any] =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : List[Any] =False lowercase : Any =True if ( model_class.__name__ in [*get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ )] or not model_class.supports_gradient_checkpointing ): continue lowercase : Any =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.gradient_checkpointing_enable() model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : int =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Dict =model_class(UpperCAmelCase__ ) lowercase : Union[str, Any] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : int =[*signature.parameters.keys()] lowercase : Optional[Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ): lowercase : int =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase : Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase : List[Any] =self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : List[str] =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase : Tuple =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : List[Any] =ConvNextVaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def _lowerCAmelCase ( ) -> List[Any]: lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase__ ) lowercase : int =self.default_image_processor lowercase : List[str] =prepare_img() lowercase : List[Any] =preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ ) # forward pass with torch.no_grad(): lowercase : Dict =model(**UpperCAmelCase__ ) # verify the logits lowercase : Optional[Any] =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase__ ) lowercase : Tuple =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
'''simple docstring''' import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' return x + 2 class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """x = 3""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase ) assert result == 3 self.assertDictEqual(lowerCamelCase ,{"""x""": 3} ) __SCREAMING_SNAKE_CASE = """x = y""" __SCREAMING_SNAKE_CASE = {"""y""": 5} __SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowerCamelCase ,{"""x""": 5, """y""": 5} ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """y = add_two(x)""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{"""add_two""": add_two} ,state=lowerCamelCase ) assert result == 5 self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: __SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase ) assert result is None assert "tried to execute add_two" in out.out def UpperCAmelCase__ ( self : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """x = 3""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase ) assert result == 3 self.assertDictEqual(lowerCamelCase ,{"""x""": 3} ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{"""add_two""": add_two} ,state=lowerCamelCase ) self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """y""": 5} ) self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """x = 3\ny = 5""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """y""": 5} ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """text = f'This is x: {x}.'""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """text""": """This is x: 3."""} ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """if x <= 3:\n y = 2\nelse:\n y = 5""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """y""": 2} ) __SCREAMING_SNAKE_CASE = {"""x""": 8} __SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowerCamelCase ,{"""x""": 8, """y""": 5} ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{"""add_two""": add_two} ,state=lowerCamelCase ) self.assertListEqual(lowerCamelCase ,[3, 5] ) self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """test_list""": [3, 5]} ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """y = x""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase ) assert result == 3 self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """y""": 3} ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]\ntest_list[1]""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{"""add_two""": add_two} ,state=lowerCamelCase ) assert result == 5 self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """test_list""": [3, 5]} ) __SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{"""add_two""": add_two} ,state=lowerCamelCase ) assert result == 5 self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """x = 0\nfor i in range(3):\n x = i""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{"""range""": range} ,state=lowerCamelCase ) assert result == 2 self.assertDictEqual(lowerCamelCase ,{"""x""": 2, """i""": 2} )
109
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels UpperCamelCase_ = object() # For specifying empty leaf dict `{}` UpperCamelCase_ = object() def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Optional[int]: lowercase : Optional[Any] =tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ): lowercase : Union[str, Any] =[x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )] if matches and all(__magic_name__ ): return True return False def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[str]: def replace(__magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ): for rule, replacement in rules: if _match(__magic_name__ , __magic_name__ ): return replacement return val return replace def _lowerCAmelCase ( ) -> int: return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , __magic_name__ )), (("transformer", "wte", "embedding"), P('''mp''' , __magic_name__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , __magic_name__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__magic_name__ , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , __magic_name__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _lowerCAmelCase ( __magic_name__ : str ) -> int: lowercase : int =_get_partition_rules() lowercase : Tuple =_replacement_rules(__magic_name__ ) lowercase : Any ={k: _unmatched for k in flatten_dict(__magic_name__ )} lowercase : Any ={k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__magic_name__ ) )
92
0
"""simple docstring""" def lowerCamelCase ( _snake_case ,_snake_case ): UpperCAmelCase__ : Union[str, Any] = len(_snake_case ) UpperCAmelCase__ : Dict = len(_snake_case ) UpperCAmelCase__ : str = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] UpperCAmelCase__ : Any = True for i in range(_snake_case ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: UpperCAmelCase__ : Union[str, Any] = True if a[i].islower(): UpperCAmelCase__ : Any = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
110
'''simple docstring''' from collections import defaultdict def _lowerCAmelCase ( __magic_name__ : int ) -> int: lowercase : Optional[Any] =1 lowercase : Union[str, Any] =True for v in tree[start]: if v not in visited: ret += dfs(__magic_name__ ) if ret % 2 == 0: cuts.append(__magic_name__ ) return ret def _lowerCAmelCase ( ) -> int: dfs(1 ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 10, 9 UpperCamelCase_ = defaultdict(list) UpperCamelCase_ = {} UpperCamelCase_ = [] UpperCamelCase_ = 0 UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
92
0
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a : Optional[int] = '''▁''' a : List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class lowerCamelCase_ ( lowercase__ , unittest.TestCase ): '''simple docstring''' __UpperCAmelCase = BertGenerationTokenizer __UpperCAmelCase = False __UpperCAmelCase = True def A ( self ) -> Union[str, Any]: '''simple docstring''' super().setUp() __lowercase = BertGenerationTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def A ( self ) -> Optional[Any]: '''simple docstring''' __lowercase = '''<s>''' __lowercase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def A ( self ) -> List[Any]: '''simple docstring''' __lowercase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''<pad>''' ) self.assertEqual(len(UpperCAmelCase__ ) , 1_0_0_2 ) def A ( self ) -> Any: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def A ( self ) -> Any: '''simple docstring''' __lowercase = BertGenerationTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) __lowercase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) __lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __lowercase = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) __lowercase = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def A ( self ) -> Dict: '''simple docstring''' return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) @slow def A ( self ) -> Union[str, Any]: '''simple docstring''' __lowercase = '''Hello World!''' __lowercase = [1_8_5_3_6, 2_2_6_0, 1_0_1] self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @slow def A ( self ) -> List[str]: '''simple docstring''' __lowercase = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) __lowercase = [ 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, ] self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @require_torch @slow def A ( self ) -> Optional[Any]: '''simple docstring''' import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence __lowercase = list(self.big_tokenizer.get_vocab().keys() )[:1_0] __lowercase = ''' '''.join(UpperCAmelCase__ ) __lowercase = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ ) __lowercase = self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ ) __lowercase = BertGenerationConfig() __lowercase = BertGenerationEncoder(UpperCAmelCase__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**UpperCAmelCase__ ) model(**UpperCAmelCase__ ) @slow def A ( self ) -> Dict: '''simple docstring''' __lowercase = {'''input_ids''': [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
639
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase_ = logging.get_logger(__name__) def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Dict: lowercase : List[str] =R'''\w+[.]\d+''' lowercase : List[str] =re.findall(__magic_name__ , __magic_name__ ) for pat in pats: lowercase : Optional[int] =key.replace(__magic_name__ , '''_'''.join(pat.split('''.''' ) ) ) return key def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> str: lowercase : Dict =pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowercase : Dict =pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowercase : Tuple =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowercase : Tuple =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowercase : str =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowercase : Optional[Any] =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowercase : Dict =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowercase : Union[str, Any] =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=42 ) -> List[str]: # Step 1: Convert pytorch tensor to numpy lowercase : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowercase : str =flax_model.init_weights(PRNGKey(__magic_name__ ) ) lowercase : Dict =flatten_dict(__magic_name__ ) lowercase : Dict ={} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowercase : Dict =rename_key(__magic_name__ ) lowercase : Optional[int] =tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowercase , lowercase : Any =rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowercase : Tuple =jnp.asarray(__magic_name__ ) return unflatten_dict(__magic_name__ )
92
0
from typing import Union import fire import torch from tqdm import tqdm def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ,lowerCAmelCase_ : str = "cpu" ,lowerCAmelCase_ : Union[str, None] = None ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict =torch.load(lowerCAmelCase_ ,map_location=lowerCAmelCase_ ) for k, v in tqdm(state_dict.items() ): if not isinstance(lowerCAmelCase_ ,torch.Tensor ): raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' ) SCREAMING_SNAKE_CASE_ : List[str] =v.half() if save_path is None: # overwrite src_path SCREAMING_SNAKE_CASE_ : Union[str, Any] =src_path torch.save(lowerCAmelCase_ ,lowerCAmelCase_ ) if __name__ == "__main__": fire.Fire(convert)
220
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. UpperCamelCase_ = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. UpperCamelCase_ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. UpperCamelCase_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]: lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] ) return (item, float(__magic_name__ )) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]: lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 ) lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:] lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str: lowercase : Union[str, Any] =list(__magic_name__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowercase : Dict =random.choice(__magic_name__ ) return "".join(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]: lowercase : Any =[] # Generate more children proportionally to the fitness score. lowercase : Dict =int(parent_a[1] * 100 ) + 1 lowercase : List[str] =10 if child_n >= 10 else child_n for _ in range(__magic_name__ ): lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0] lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ ) # Append new string to the population list. pop.append(mutate(__magic_name__ , __magic_name__ ) ) pop.append(mutate(__magic_name__ , __magic_name__ ) ) return pop def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(__magic_name__ ) # Verify that the target contains no genes besides the ones inside genes variable. lowercase : Optional[int] =sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(__magic_name__ ) # Generate random starting population. lowercase : int =[] for _ in range(__magic_name__ ): population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) ) # Just some logs to know what the algorithms is doing. lowercase , lowercase : Optional[int] =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__magic_name__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population] # Check if there is a matching evolution. lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowercase : Any =population[: int(N_POPULATION / 3 )] population.clear() population.extend(__magic_name__ ) # Normalize population score to be between 0 and 1. lowercase : Dict =[ (item, score / len(__magic_name__ )) for item, score in population_score ] # This is selection for i in range(__magic_name__ ): population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__magic_name__ ) > N_POPULATION: break if __name__ == "__main__": UpperCamelCase_ = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) UpperCamelCase_ = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
92
0
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE_ ( lowercase__ , unittest.TestCase ): """simple docstring""" __lowerCAmelCase : Optional[Any] =GPTaTokenizer __lowerCAmelCase : Optional[int] =GPTaTokenizerFast __lowerCAmelCase : Optional[Any] =True __lowerCAmelCase : Any ={'''add_prefix_space''': True} __lowerCAmelCase : str =False def UpperCamelCase__ ( self :Tuple): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _lowercase =[ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] _lowercase =dict(zip(UpperCAmelCase__, range(len(UpperCAmelCase__)))) _lowercase =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _lowercase ={'''unk_token''': '''<unk>'''} _lowercase =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file']) _lowercase =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file, 'w', encoding='utf-8') as fp: fp.write(json.dumps(UpperCAmelCase__) + '\n') with open(self.merges_file, 'w', encoding='utf-8') as fp: fp.write('\n'.join(UpperCAmelCase__)) def UpperCamelCase__ ( self :str, **snake_case :Dict): """simple docstring""" kwargs.update(self.special_tokens_map) return GPTaTokenizer.from_pretrained(self.tmpdirname, **UpperCAmelCase__) def UpperCamelCase__ ( self :str, **snake_case :List[Any]): """simple docstring""" kwargs.update(self.special_tokens_map) return GPTaTokenizerFast.from_pretrained(self.tmpdirname, **UpperCAmelCase__) def UpperCamelCase__ ( self :Any, snake_case :Tuple): """simple docstring""" _lowercase ='''lower newer''' _lowercase ='''lower newer''' return input_text, output_text def UpperCamelCase__ ( self :Dict): """simple docstring""" _lowercase =GPTaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map) _lowercase ='''lower newer''' _lowercase =['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] _lowercase =tokenizer.tokenize(UpperCAmelCase__, add_prefix_space=UpperCAmelCase__) self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__) _lowercase =tokens + [tokenizer.unk_token] _lowercase =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__), UpperCAmelCase__) def UpperCamelCase__ ( self :Union[str, Any]): """simple docstring""" if not self.test_rust_tokenizer: return _lowercase =self.get_tokenizer() _lowercase =self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase__) _lowercase ='''lower newer''' # Testing tokenization _lowercase =tokenizer.tokenize(UpperCAmelCase__, add_prefix_space=UpperCAmelCase__) _lowercase =rust_tokenizer.tokenize(UpperCAmelCase__) self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__) # Testing conversion to ids without special tokens _lowercase =tokenizer.encode(UpperCAmelCase__, add_special_tokens=UpperCAmelCase__, add_prefix_space=UpperCAmelCase__) _lowercase =rust_tokenizer.encode(UpperCAmelCase__, add_special_tokens=UpperCAmelCase__) self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__) # Testing conversion to ids with special tokens _lowercase =self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase__) _lowercase =tokenizer.encode(UpperCAmelCase__, add_prefix_space=UpperCAmelCase__) _lowercase =rust_tokenizer.encode(UpperCAmelCase__) self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__) # Testing the unknown token _lowercase =tokens + [rust_tokenizer.unk_token] _lowercase =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCAmelCase__), UpperCAmelCase__) def UpperCamelCase__ ( self :Tuple, *snake_case :List[str], **snake_case :int): """simple docstring""" pass def UpperCamelCase__ ( self :Any, snake_case :List[Any]=15): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''): _lowercase =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__, **UpperCAmelCase__) # Simple input _lowercase ='''This is a simple input''' _lowercase =['''This is a simple input 1''', '''This is a simple input 2'''] _lowercase =('''This is a simple input''', '''This is a pair''') _lowercase =[ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(UpperCAmelCase__, tokenizer_r.encode, UpperCAmelCase__, max_length=UpperCAmelCase__, padding='max_length') # Simple input self.assertRaises(UpperCAmelCase__, tokenizer_r.encode_plus, UpperCAmelCase__, max_length=UpperCAmelCase__, padding='max_length') # Simple input self.assertRaises( UpperCAmelCase__, tokenizer_r.batch_encode_plus, UpperCAmelCase__, max_length=UpperCAmelCase__, padding='max_length', ) # Pair input self.assertRaises(UpperCAmelCase__, tokenizer_r.encode, UpperCAmelCase__, max_length=UpperCAmelCase__, padding='max_length') # Pair input self.assertRaises(UpperCAmelCase__, tokenizer_r.encode_plus, UpperCAmelCase__, max_length=UpperCAmelCase__, padding='max_length') # Pair input self.assertRaises( UpperCAmelCase__, tokenizer_r.batch_encode_plus, UpperCAmelCase__, max_length=UpperCAmelCase__, padding='max_length', ) def UpperCamelCase__ ( self :str): """simple docstring""" _lowercase =GPTaTokenizer.from_pretrained(self.tmpdirname, pad_token='<pad>') # Simple input _lowercase ='''This is a simple input''' _lowercase =['''This is a simple input looooooooong''', '''This is a simple input'''] _lowercase =('''This is a simple input''', '''This is a pair''') _lowercase =[ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] _lowercase =tokenizer.pad_token_id _lowercase =tokenizer(UpperCAmelCase__, padding='max_length', max_length=30, return_tensors='np') _lowercase =tokenizer(UpperCAmelCase__, padding=UpperCAmelCase__, truncate=UpperCAmelCase__, return_tensors='np') _lowercase =tokenizer(*UpperCAmelCase__, padding='max_length', max_length=60, return_tensors='np') _lowercase =tokenizer(UpperCAmelCase__, padding=UpperCAmelCase__, truncate=UpperCAmelCase__, return_tensors='np') # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1], 30) self.assertTrue(pad_token_id in out_s['input_ids']) self.assertTrue(0 in out_s['attention_mask']) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1], 33) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0]) self.assertFalse(0 in out_sa['attention_mask'][0]) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1]) self.assertTrue(0 in out_sa['attention_mask'][1]) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1], 60) self.assertTrue(pad_token_id in out_p['input_ids']) self.assertTrue(0 in out_p['attention_mask']) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1], 52) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0]) self.assertFalse(0 in out_pa['attention_mask'][0]) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1]) self.assertTrue(0 in out_pa['attention_mask'][1]) def UpperCamelCase__ ( self :Union[str, Any]): """simple docstring""" _lowercase ='''$$$''' _lowercase =GPTaTokenizer.from_pretrained(self.tmpdirname, bos_token=UpperCAmelCase__, add_bos_token=UpperCAmelCase__) _lowercase ='''This is a simple input''' _lowercase =['''This is a simple input 1''', '''This is a simple input 2'''] _lowercase =tokenizer.bos_token_id _lowercase =tokenizer(UpperCAmelCase__) _lowercase =tokenizer(UpperCAmelCase__) self.assertEqual(out_s.input_ids[0], UpperCAmelCase__) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids)) _lowercase =tokenizer.decode(out_s.input_ids) _lowercase =tokenizer.batch_decode(out_sa.input_ids) self.assertEqual(decode_s.split()[0], UpperCAmelCase__) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa)) def UpperCamelCase__ ( self :Dict): """simple docstring""" pass def UpperCamelCase__ ( self :Optional[Any]): """simple docstring""" _lowercase =[self.get_tokenizer(do_lower_case=UpperCAmelCase__, add_bos_token=UpperCAmelCase__)] for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}'''): _lowercase ='''Encode this.''' _lowercase ='''This one too please.''' _lowercase =tokenizer.encode(UpperCAmelCase__, add_special_tokens=UpperCAmelCase__) encoded_sequence += tokenizer.encode(UpperCAmelCase__, add_special_tokens=UpperCAmelCase__) _lowercase =tokenizer.encode_plus( UpperCAmelCase__, UpperCAmelCase__, add_special_tokens=UpperCAmelCase__, return_special_tokens_mask=UpperCAmelCase__, ) _lowercase =encoded_sequence_dict['''input_ids'''] _lowercase =encoded_sequence_dict['''special_tokens_mask'''] self.assertEqual(len(UpperCAmelCase__), len(UpperCAmelCase__)) _lowercase =[ (x if not special_tokens_mask[i] else None) for i, x in enumerate(UpperCAmelCase__) ] _lowercase =[x for x in filtered_sequence if x is not None] self.assertEqual(UpperCAmelCase__, UpperCAmelCase__) @require_tokenizers class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self :Dict): """simple docstring""" _lowercase =AutoTokenizer.from_pretrained('facebook/opt-350m', from_slow=UpperCAmelCase__) _lowercase ='''A photo of a cat''' _lowercase =tokenizer.encode( UpperCAmelCase__, ) self.assertEqual(UpperCAmelCase__, [2, 250, 1345, 9, 10, 4758]) tokenizer.save_pretrained('test_opt') _lowercase =AutoTokenizer.from_pretrained('./test_opt') _lowercase =tokenizer.encode( UpperCAmelCase__, ) self.assertEqual(UpperCAmelCase__, [2, 250, 1345, 9, 10, 4758]) def UpperCamelCase__ ( self :List[str]): """simple docstring""" _lowercase =AutoTokenizer.from_pretrained('facebook/opt-350m', use_slow=UpperCAmelCase__) _lowercase ='''A photo of a cat''' _lowercase =tokenizer.encode( UpperCAmelCase__, ) # Same as above self.assertEqual(UpperCAmelCase__, [2, 250, 1345, 9, 10, 4758]) @unittest.skip('This test is failing because of a bug in the fast tokenizer') def UpperCamelCase__ ( self :Optional[Any]): """simple docstring""" _lowercase =AutoTokenizer.from_pretrained('facebook/opt-350m', from_slow=UpperCAmelCase__) _lowercase ='''bos''' _lowercase =tokenizer.get_vocab()['''bos'''] _lowercase ='''A photo of a cat''' _lowercase =tokenizer.encode( UpperCAmelCase__, ) # We changed the bos token self.assertEqual(UpperCAmelCase__, [3_1957, 250, 1345, 9, 10, 4758]) tokenizer.save_pretrained('./tok') _lowercase =AutoTokenizer.from_pretrained('./tok') self.assertTrue(tokenizer.is_fast) _lowercase =tokenizer.encode( UpperCAmelCase__, ) self.assertEqual(UpperCAmelCase__, [3_1957, 250, 1345, 9, 10, 4758])
181
'''simple docstring''' import datasets UpperCamelCase_ = """\ @InProceedings{conneau2018xnli, author = \"Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin\", title = \"XNLI: Evaluating Cross-lingual Sentence Representations\", booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\", year = \"2018\", publisher = \"Association for Computational Linguistics\", location = \"Brussels, Belgium\", } """ UpperCamelCase_ = """\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). """ UpperCamelCase_ = """ Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: 'accuracy': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric(\"xnli\") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} """ def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): def lowerCamelCase_ ( self : str ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
92
0
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __UpperCamelCase : Tuple = logging.getLogger() @unittest.skip('''Temporarily disable the doc tests.''' ) @require_torch @require_tf @slow class a ( unittest.TestCase ): def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = True , ): """simple docstring""" lowerCAmelCase = [file for file in os.listdir(UpperCAmelCase__ ) if os.path.isfile(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )] if identifier is not None: lowerCAmelCase = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): for n_ in n_identifier: lowerCAmelCase = [file for file in files if n_ not in file] else: lowerCAmelCase = [file for file in files if n_identifier not in file] lowerCAmelCase = ignore_files or [] ignore_files.append('__init__.py' ) lowerCAmelCase = [file for file in files if file not in ignore_files] for file in files: # Open all files print('Testing' , UpperCAmelCase__ ) if only_modules: lowerCAmelCase = file.split('.' )[0] try: lowerCAmelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = doctest.DocTestSuite(UpperCAmelCase__ ) lowerCAmelCase = unittest.TextTestRunner().run(UpperCAmelCase__ ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F'{module_identifier} is not a module.' ) else: lowerCAmelCase = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = Path('src/transformers' ) lowerCAmelCase = '''modeling''' lowerCAmelCase = [ '''modeling_ctrl.py''', '''modeling_tf_ctrl.py''', ] self.analyze_directory(UpperCAmelCase__ , identifier=UpperCAmelCase__ , ignore_files=UpperCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = Path('src/transformers' ) lowerCAmelCase = '''tokenization''' self.analyze_directory(UpperCAmelCase__ , identifier=UpperCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = Path('src/transformers' ) lowerCAmelCase = '''configuration''' self.analyze_directory(UpperCAmelCase__ , identifier=UpperCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = Path('src/transformers' ) lowerCAmelCase = ['''configuration''', '''modeling''', '''tokenization'''] self.analyze_directory(UpperCAmelCase__ , n_identifier=UpperCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = Path('docs/source' ) lowerCAmelCase = ['''favicon.ico'''] self.analyze_directory(UpperCAmelCase__ , ignore_files=UpperCAmelCase__ , only_modules=UpperCAmelCase__ )
4
'''simple docstring''' from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class __SCREAMING_SNAKE_CASE : def __init__( self : List[str] , UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : Any =parent lowercase : Optional[int] =13 lowercase : Union[str, Any] =7 lowercase : str =30 lowercase : Optional[int] =self.seq_length + self.mem_len lowercase : Dict =15 lowercase : List[str] =True lowercase : Optional[int] =True lowercase : Tuple =99 lowercase : str =[10, 50, 80] lowercase : List[Any] =32 lowercase : Optional[int] =32 lowercase : int =4 lowercase : Any =8 lowercase : List[Any] =128 lowercase : List[str] =2 lowercase : Tuple =2 lowercase : int =None lowercase : Optional[int] =1 lowercase : int =0 lowercase : List[str] =3 lowercase : str =self.vocab_size - 1 lowercase : Tuple =0.01 def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : str =None if self.use_labels: lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Union[str, Any] =TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' random.seed(self.seed ) tf.random.set_seed(self.seed ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Tuple =TFTransfoXLModel(UpperCAmelCase__ ) lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple() lowercase : List[str] ={'''input_ids''': input_ids_a, '''mems''': mems_a} lowercase , lowercase : Any =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : int =TFTransfoXLLMHeadModel(UpperCAmelCase__ ) lowercase , lowercase : Tuple =model(UpperCAmelCase__ ).to_tuple() lowercase : Optional[Any] ={'''input_ids''': input_ids_a, '''labels''': lm_labels} lowercase , lowercase : Optional[int] =model(UpperCAmelCase__ ).to_tuple() lowercase , lowercase : List[str] =model([input_ids_a, mems_a] ).to_tuple() lowercase : int ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels} lowercase , lowercase : str =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[int] =TFTransfoXLForSequenceClassification(UpperCAmelCase__ ) lowercase : Union[str, Any] =model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.prepare_config_and_inputs() ((lowercase) , (lowercase) , (lowercase) , (lowercase)) : Optional[Any] =config_and_inputs lowercase : Union[str, Any] ={'''input_ids''': input_ids_a} return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) lowerCamelCase_ = () if is_tf_available() else () lowerCamelCase_ = ( { 'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =TFTransfoXLModelTester(self ) lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' self.model_tester.set_seed() lowercase : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.model_tester.set_seed() lowercase : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common() lowercase : int =[TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: lowercase : str =model_class(UpperCAmelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: lowercase : Union[str, Any] =model.get_output_embeddings() assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer ) lowercase : Any =model.get_bias() assert name is None else: lowercase : Optional[int] =model.get_output_embeddings() assert x is None lowercase : Optional[int] =model.get_bias() assert name is None def lowerCamelCase_ ( self : Any ): '''simple docstring''' # TODO JP: Make TransfoXL XLA compliant pass @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : int =TFTransfoXLModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' ) def lowerCamelCase_ ( self : int ): '''simple docstring''' pass @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @unittest.skip('''Skip test until #12651 is resolved.''' ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' ) # fmt: off lowercase : Tuple =tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off lowercase : Optional[int] =[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> lowercase : int =model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ ) self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
92
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _snake_case = logging.get_logger(__name__) class _lowerCAmelCase ( lowercase__ , lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] ="maskformer-swin" SCREAMING_SNAKE_CASE_ : Optional[int] ={ "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : int , SCREAMING_SNAKE_CASE__ : Tuple=2_24 , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=96 , SCREAMING_SNAKE_CASE__ : int=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__ : str=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : List[str]=4.0 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-5 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ): """simple docstring""" super().__init__(**UpperCAmelCase__ ) UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = embed_dim UpperCamelCase = depths UpperCamelCase = len(UpperCAmelCase__ ) UpperCamelCase = num_heads UpperCamelCase = window_size UpperCamelCase = mlp_ratio UpperCamelCase = qkv_bias UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = drop_path_rate UpperCamelCase = hidden_act UpperCamelCase = use_absolute_embeddings UpperCamelCase = layer_norm_eps UpperCamelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCamelCase = int(embed_dim * 2 ** (len(UpperCAmelCase__ ) - 1) ) UpperCamelCase = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(UpperCAmelCase__ ) + 1 )] UpperCamelCase = get_aligned_output_features_output_indices( out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
282
'''simple docstring''' import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE : def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=36 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Tuple=None , ): '''simple docstring''' lowercase : str =parent lowercase : int =batch_size lowercase : Any =seq_length lowercase : int =is_training lowercase : str =use_input_mask lowercase : int =use_token_type_ids lowercase : Dict =use_labels lowercase : int =vocab_size lowercase : str =embedding_size lowercase : Union[str, Any] =hidden_size lowercase : Tuple =num_hidden_layers lowercase : Any =num_hidden_groups lowercase : Union[str, Any] =num_attention_heads lowercase : Any =intermediate_size lowercase : Tuple =hidden_act lowercase : Optional[int] =hidden_dropout_prob lowercase : Union[str, Any] =attention_probs_dropout_prob lowercase : List[Any] =max_position_embeddings lowercase : int =type_vocab_size lowercase : int =type_sequence_label_size lowercase : Any =initializer_range lowercase : List[Any] =num_labels lowercase : int =num_choices lowercase : Optional[int] =scope def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[int] =None if self.use_input_mask: lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : Dict =None if self.use_token_type_ids: lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : Tuple =None lowercase : Any =None lowercase : Dict =None if self.use_labels: lowercase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices ) lowercase : Any =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : int =AlbertModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : int =model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Tuple =AlbertForPreTraining(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , sentence_order_label=UpperCAmelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Tuple =AlbertForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : List[str] =AlbertForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[Any] =self.num_labels lowercase : Any =AlbertForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ): '''simple docstring''' lowercase : List[Any] =self.num_labels lowercase : str =AlbertForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Optional[int] =self.num_choices lowercase : List[Any] =AlbertForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Union[str, Any] =self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Dict =config_and_inputs lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase_ = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase_ = True def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=False ): '''simple docstring''' lowercase : Optional[int] =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class in get_values(UpperCAmelCase__ ): lowercase : Any =torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ ) lowercase : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) return inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Tuple =AlbertModelTester(self ) lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase : Tuple =type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : str =AlbertModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : int =AlbertModel.from_pretrained('''albert-base-v2''' ) lowercase : Optional[int] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowercase : Any =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] lowercase : int =torch.Size((1, 11, 768) ) self.assertEqual(output.shape , UpperCAmelCase__ ) lowercase : Union[str, Any] =torch.tensor( [[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
"""simple docstring""" import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params __magic_name__ = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["""memory_attention""", """encoder_attn"""], ["""attention""", """attn"""], ["""/""", """."""], [""".LayerNorm.gamma""", """_layer_norm.weight"""], [""".LayerNorm.beta""", """_layer_norm.bias"""], ["""r.layer_""", """r.layers."""], ["""output_proj""", """out_proj"""], ["""ffn.dense_1.""", """fc2."""], ["""ffn.dense.""", """fc1."""], ["""ffn_layer_norm""", """final_layer_norm"""], ["""kernel""", """weight"""], ["""encoder_layer_norm.""", """encoder.layer_norm."""], ["""decoder_layer_norm.""", """decoder.layer_norm."""], ["""embeddings.weights""", """shared.weight"""], ] def _A ( __lowercase ): """simple docstring""" for pegasus_name, hf_name in PATTERNS: lowerCamelCase__ = k.replace(__lowercase , __lowercase ) return k def _A ( __lowercase , __lowercase ): """simple docstring""" lowerCamelCase__ = DEFAULTS.copy() cfg_kwargs.update(__lowercase ) lowerCamelCase__ = PegasusConfig(**__lowercase ) lowerCamelCase__ = PegasusForConditionalGeneration(__lowercase ) lowerCamelCase__ = torch_model.model.state_dict() lowerCamelCase__ = {} for k, v in tf_weights.items(): lowerCamelCase__ = rename_state_dict_key(__lowercase ) if new_k not in sd: raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if "dense" in k or "proj" in new_k: lowerCamelCase__ = v.T lowerCamelCase__ = torch.tensor(__lowercase , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}""" # make sure embedding.padding_idx is respected lowerCamelCase__ = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] ) lowerCamelCase__ = mapping['''shared.weight'''] lowerCamelCase__ = mapping['''shared.weight'''] lowerCamelCase__ = {k: torch.zeros_like(__lowercase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping} mapping.update(**__lowercase ) lowerCamelCase__ = torch_model.model.load_state_dict(__lowercase , strict=__lowercase ) lowerCamelCase__ = [ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], f"""no matches found for the following tf keys {extra}""" return torch_model def _A ( __lowercase="./ckpt/aeslc/model.ckpt-32000" ): """simple docstring""" lowerCamelCase__ = tf.train.list_variables(__lowercase ) lowerCamelCase__ = {} lowerCamelCase__ = ['''Adafactor''', '''global_step'''] for name, shape in tqdm(__lowercase , desc="""converting tf checkpoint to dict""" ): lowerCamelCase__ = any(pat in name for pat in ignore_name ) if skip_key: continue lowerCamelCase__ = tf.train.load_variable(__lowercase , __lowercase ) lowerCamelCase__ = array return tf_weights def _A ( __lowercase , __lowercase ): """simple docstring""" lowerCamelCase__ = Path(__lowercase ).parent.name lowerCamelCase__ = task_specific_params[f"""summarization_{dataset}"""]['''max_position_embeddings'''] lowerCamelCase__ = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__lowercase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(__lowercase ) # convert model lowerCamelCase__ = get_tf_weights_as_numpy(__lowercase ) lowerCamelCase__ = task_specific_params[f"""summarization_{dataset}"""] if dataset == "large": lowerCamelCase__ = task_specific_params lowerCamelCase__ = convert_pegasus(__lowercase , __lowercase ) torch_model.save_pretrained(__lowercase ) lowerCamelCase__ = torch_model.state_dict() sd.pop("""model.decoder.embed_positions.weight""" ) sd.pop("""model.encoder.embed_positions.weight""" ) torch.save(__lowercase , Path(__lowercase ) / """pytorch_model.bin""" ) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") __magic_name__ = parser.parse_args() if args.save_dir is None: __magic_name__ = Path(args.tf_ckpt_path).parent.name __magic_name__ = os.path.join("""pegasus""", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
129
'''simple docstring''' import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ): '''simple docstring''' if dst_width < 0 or dst_height < 0: raise ValueError('''Destination width/height should be > 0''' ) lowercase : Union[str, Any] =img lowercase : Union[str, Any] =img.shape[1] lowercase : str =img.shape[0] lowercase : Union[str, Any] =dst_width lowercase : str =dst_height lowercase : str =self.src_w / self.dst_w lowercase : Optional[Any] =self.src_h / self.dst_h lowercase : int =( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for i in range(self.dst_h ): for j in range(self.dst_w ): lowercase : List[Any] =self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )] def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_x * x ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_y * y ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 800, 600 UpperCamelCase_ = imread("""image_data/lena.jpg""", 1) UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output ) waitKey(0) destroyAllWindows()
92
0
import argparse import os import torch from transformers.utils import WEIGHTS_NAME _A = ["small", "medium", "large"] _A = "lm_head.decoder.weight" _A = "lm_head.weight" def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ): """simple docstring""" lowerCAmelCase_ = torch.load(__lowerCAmelCase ) lowerCAmelCase_ = d.pop(__lowerCAmelCase ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument("--dialogpt_path", default=".", type=str) _A = parser.parse_args() for MODEL in DIALOGPT_MODELS: _A = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""") _A = f"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
290
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Any =0.0_0 lowercase : Tuple =0 for resistor in resistors: if resistor <= 0: lowercase : Dict =f'''Resistor at index {index} has a negative or zero value!''' raise ValueError(__magic_name__ ) first_sum += 1 / float(__magic_name__ ) index += 1 return 1 / first_sum def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Optional[Any] =0.0_0 lowercase : int =0 for resistor in resistors: sum_r += resistor if resistor < 0: lowercase : Tuple =f'''Resistor at index {index} has a negative value!''' raise ValueError(__magic_name__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
92
0
'''simple docstring''' import numpy as np import datasets A__ : Optional[int] = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' A__ : Any = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' A__ : int = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case__ ( datasets.Metric ): def A_ ( self : Any ) -> List[str]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'X': datasets.Sequence(datasets.Value('float' , id='sequence' ) , id='X' ), } ) , ) def A_ ( self : str , __a : Optional[Any] , __a : str ) -> Union[str, Any]: '''simple docstring''' __snake_case : Optional[Any] = np.array(UpperCAmelCase__ ) __snake_case : List[str] = np.array(UpperCAmelCase__ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('Expected `X` to be a 2D vector' ) if len(reference_distribution.shape ) != 2: raise ValueError('Expected `reference_distribution` to be a 2D vector' ) if reference_distribution.shape[0] < 2: raise ValueError( 'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' ) # Get mahalanobis distance for each prediction __snake_case : Tuple = X - np.mean(UpperCAmelCase__ ) __snake_case : int = np.cov(reference_distribution.T ) try: __snake_case : Dict = np.linalg.inv(UpperCAmelCase__ ) except np.linalg.LinAlgError: __snake_case : Optional[Any] = np.linalg.pinv(UpperCAmelCase__ ) __snake_case : int = np.dot(UpperCAmelCase__ , UpperCAmelCase__ ) __snake_case : Any = np.dot(UpperCAmelCase__ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
286
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""", """self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""", """self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""", """self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""", """self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""", """self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""", """self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""", """self_attn.rotary_emb""": """encoder.embed_positions""", """self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""", """conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""", """conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""", """conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""", """conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""", """conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""", """ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""", """ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""", """ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""", """ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""", """ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""", """ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } UpperCamelCase_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> str: for attribute in key.split('''.''' ): lowercase : Tuple =getattr(__magic_name__ , __magic_name__ ) if weight_type is not None: lowercase : Optional[int] =getattr(__magic_name__ , __magic_name__ ).shape else: lowercase : List[Any] =hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase : Any =value elif weight_type == "weight_g": lowercase : List[Any] =value elif weight_type == "weight_v": lowercase : Union[str, Any] =value elif weight_type == "bias": lowercase : Tuple =value elif weight_type == "running_mean": lowercase : Union[str, Any] =value elif weight_type == "running_var": lowercase : str =value elif weight_type == "num_batches_tracked": lowercase : Tuple =value elif weight_type == "inv_freq": lowercase : Optional[Any] =value else: lowercase : Tuple =value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]: lowercase : Optional[int] =[] lowercase : Tuple =fairseq_model.state_dict() lowercase : List[Any] =hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): lowercase : Tuple =False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , ) lowercase : List[Any] =True else: for key, mapped_key in MAPPING.items(): lowercase : Optional[int] ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowercase : Union[str, Any] =True if "*" in mapped_key: lowercase : Optional[int] =name.split(__magic_name__ )[0].split('''.''' )[-2] lowercase : List[str] =mapped_key.replace('''*''' , __magic_name__ ) if "pos_bias_u" in name: lowercase : Optional[Any] =None elif "pos_bias_v" in name: lowercase : Union[str, Any] =None elif "weight_g" in name: lowercase : Any ='''weight_g''' elif "weight_v" in name: lowercase : Tuple ='''weight_v''' elif "bias" in name: lowercase : Optional[int] ='''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase : Optional[int] ='''weight''' elif "running_mean" in name: lowercase : Union[str, Any] ='''running_mean''' elif "inv_freq" in name: lowercase : Any ='''inv_freq''' elif "running_var" in name: lowercase : Tuple ='''running_var''' elif "num_batches_tracked" in name: lowercase : Dict ='''num_batches_tracked''' else: lowercase : str =None set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) continue if not is_used: unused_weights.append(__magic_name__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> int: lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1] lowercase : Any =name.split('''.''' ) lowercase : List[str] =int(items[0] ) lowercase : Union[str, Any] =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase : Union[str, Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase : Optional[Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) lowercase : Optional[int] =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase : str =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__magic_name__ ) @torch.no_grad() def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=True ) -> Union[str, Any]: if config_path is not None: lowercase : Optional[Any] =WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' ) else: lowercase : Optional[int] =WavaVecaConformerConfig() if "rope" in checkpoint_path: lowercase : Dict ='''rotary''' if is_finetuned: if dict_path: lowercase : Optional[Any] =Dictionary.load(__magic_name__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase : str =target_dict.pad_index lowercase : Union[str, Any] =target_dict.bos_index lowercase : Any =target_dict.eos_index lowercase : Tuple =len(target_dict.symbols ) lowercase : str =os.path.join(__magic_name__ , '''vocab.json''' ) if not os.path.isdir(__magic_name__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) ) return os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowercase : Dict =target_dict.indices # fairseq has the <pad> and <s> switched lowercase : str =0 lowercase : List[Any] =1 with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(__magic_name__ , __magic_name__ ) lowercase : List[str] =WavaVecaCTCTokenizer( __magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , ) lowercase : Optional[int] =True if config.feat_extract_norm == '''layer''' else False lowercase : str =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , ) lowercase : Tuple =WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) processor.save_pretrained(__magic_name__ ) lowercase : str =WavaVecaConformerForCTC(__magic_name__ ) else: lowercase : Tuple =WavaVecaConformerForPreTraining(__magic_name__ ) if is_finetuned: lowercase , lowercase , lowercase : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: lowercase : Dict =argparse.Namespace(task='''audio_pretraining''' ) lowercase : Optional[int] =fairseq.tasks.setup_task(__magic_name__ ) lowercase , lowercase , lowercase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ ) lowercase : List[Any] =model[0].eval() recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned ) hf_wavavec.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCamelCase_ = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
92
0
import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() snake_case : Tuple = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) snake_case : Optional[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", f"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", f"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.encoder.norm.weight', 'encoder.layernorm.weight'), ('transformer.encoder.norm.bias', 'encoder.layernorm.bias'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ] ) def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ): """simple docstring""" _SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE = val def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ): """simple docstring""" _SCREAMING_SNAKE_CASE = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _SCREAMING_SNAKE_CASE = key.replace('backbone.0.body' ,'backbone.conv_encoder.model' ) _SCREAMING_SNAKE_CASE = value else: _SCREAMING_SNAKE_CASE = value return new_state_dict def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ): """simple docstring""" _SCREAMING_SNAKE_CASE = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _SCREAMING_SNAKE_CASE = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) _SCREAMING_SNAKE_CASE = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE = in_proj_weight[:256, :] _SCREAMING_SNAKE_CASE = in_proj_bias[:256] _SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :] _SCREAMING_SNAKE_CASE = in_proj_bias[256:512] _SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :] _SCREAMING_SNAKE_CASE = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention _SCREAMING_SNAKE_CASE = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) _SCREAMING_SNAKE_CASE = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE = in_proj_weight[:256, :] _SCREAMING_SNAKE_CASE = in_proj_bias[:256] _SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :] _SCREAMING_SNAKE_CASE = in_proj_bias[256:512] _SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :] _SCREAMING_SNAKE_CASE = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention _SCREAMING_SNAKE_CASE = state_dict.pop( f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) _SCREAMING_SNAKE_CASE = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict _SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[:256, :] _SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[:256] _SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[256:512, :] _SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[256:512] _SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[-256:, :] _SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[-256:] def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ): """simple docstring""" _SCREAMING_SNAKE_CASE = image.size _SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ ,UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE = 800 if '''detection''' in checkpoint_url else 1000 _SCREAMING_SNAKE_CASE = target_max_size / current_max_size _SCREAMING_SNAKE_CASE = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ): """simple docstring""" _SCREAMING_SNAKE_CASE = F.to_tensor(UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE = F.normalize(UpperCAmelCase__ ,mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] ,std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ) return image @torch.no_grad() def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ): """simple docstring""" logger.info('Converting model...' ) # load original state dict _SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ ,map_location='cpu' ) # rename keys for src, dest in rename_keys: rename_key(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE = rename_backbone_keys(UpperCAmelCase__ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCAmelCase__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _SCREAMING_SNAKE_CASE = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): _SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE = val # create HuggingFace model and load state dict _SCREAMING_SNAKE_CASE = TableTransformerConfig( backbone='resnet18' ,mask_loss_coefficient=1 ,dice_loss_coefficient=1 ,ce_loss_coefficient=1 ,bbox_loss_coefficient=5 ,giou_loss_coefficient=2 ,eos_coefficient=0.4 ,class_cost=1 ,bbox_cost=5 ,giou_cost=2 ,) if "detection" in checkpoint_url: _SCREAMING_SNAKE_CASE = 15 _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = {0: '''table''', 1: '''table rotated'''} _SCREAMING_SNAKE_CASE = idalabel _SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} else: _SCREAMING_SNAKE_CASE = 125 _SCREAMING_SNAKE_CASE = 6 _SCREAMING_SNAKE_CASE = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } _SCREAMING_SNAKE_CASE = idalabel _SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} _SCREAMING_SNAKE_CASE = DetrImageProcessor( format='coco_detection' ,max_size=800 if 'detection' in checkpoint_url else 1000 ) _SCREAMING_SNAKE_CASE = TableTransformerForObjectDetection(UpperCAmelCase__ ) model.load_state_dict(UpperCAmelCase__ ) model.eval() # verify our conversion _SCREAMING_SNAKE_CASE = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' _SCREAMING_SNAKE_CASE = hf_hub_download(repo_id='nielsr/example-pdf' ,repo_type='dataset' ,filename=UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE = Image.open(UpperCAmelCase__ ).convert('RGB' ) _SCREAMING_SNAKE_CASE = normalize(resize(UpperCAmelCase__ ,UpperCAmelCase__ ) ).unsqueeze(0 ) _SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) if "detection" in checkpoint_url: _SCREAMING_SNAKE_CASE = (1, 15, 3) _SCREAMING_SNAKE_CASE = torch.tensor( [[-6.7_8_9_7, -16.9985, 6.7_9_3_7], [-8.0_1_8_6, -22.2192, 6.9_6_7_7], [-7.3_1_1_7, -21.0708, 7.4_0_5_5]] ) _SCREAMING_SNAKE_CASE = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] ) else: _SCREAMING_SNAKE_CASE = (1, 125, 7) _SCREAMING_SNAKE_CASE = torch.tensor( [[-18.1430, -8.3_2_1_4, 4.8_2_7_4], [-18.4685, -7.1_3_6_1, -4.2_6_6_7], [-26.3693, -9.3_4_2_9, -4.9_9_6_2]] ) _SCREAMING_SNAKE_CASE = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] ,UpperCAmelCase__ ,atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] ,UpperCAmelCase__ ,atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) model.save_pretrained(UpperCAmelCase__ ) image_processor.save_pretrained(UpperCAmelCase__ ) if push_to_hub: # Push model to HF hub logger.info('Pushing model to the hub...' ) _SCREAMING_SNAKE_CASE = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(UpperCAmelCase__ ) image_processor.push_to_hub(UpperCAmelCase__ ) if __name__ == "__main__": snake_case : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth', type=str, choices=[ 'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth', 'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth', ], help='URL of the Table Transformer checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) snake_case : Tuple = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
605
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def _lowerCAmelCase ( __magic_name__ : jnp.ndarray , __magic_name__ : int , __magic_name__ : float = 1 , __magic_name__ : float = 1 , __magic_name__ : float = 1.0E4 , __magic_name__ : bool = False , __magic_name__ : float = 1.0 , ) -> jnp.ndarray: assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even''' lowercase : int =float(embedding_dim // 2 ) lowercase : Optional[int] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) lowercase : Any =min_timescale * jnp.exp(jnp.arange(__magic_name__ , dtype=jnp.floataa ) * -log_timescale_increment ) lowercase : List[Any] =jnp.expand_dims(__magic_name__ , 1 ) * jnp.expand_dims(__magic_name__ , 0 ) # scale embeddings lowercase : Tuple =scale * emb if flip_sin_to_cos: lowercase : Dict =jnp.concatenate([jnp.cos(__magic_name__ ), jnp.sin(__magic_name__ )] , axis=1 ) else: lowercase : Any =jnp.concatenate([jnp.sin(__magic_name__ ), jnp.cos(__magic_name__ )] , axis=1 ) lowercase : List[str] =jnp.reshape(__magic_name__ , [jnp.shape(__magic_name__ )[0], embedding_dim] ) return signal class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = jnp.floataa @nn.compact def __call__( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : List[Any] =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCAmelCase__ ) lowercase : Any =nn.silu(UpperCAmelCase__ ) lowercase : int =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCAmelCase__ ) return temb class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = False lowerCamelCase_ = 1 @nn.compact def __call__( self : int , UpperCAmelCase__ : str ): '''simple docstring''' return get_sinusoidal_embeddings( UpperCAmelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
92
0
"""simple docstring""" import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase__ ( lowercase__ ): lowercase__ : Dict = (DPMSolverSDEScheduler,) lowercase__ : List[Any] = 10 def lowercase_ ( self , **UpperCamelCase__ ): '''simple docstring''' A__ = { '''num_train_timesteps''': 11_00, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**UpperCAmelCase__ ) return config def lowercase_ ( self ): '''simple docstring''' for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=UpperCAmelCase__ ) def lowercase_ ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ ) def lowercase_ ( self ): '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=UpperCAmelCase__ ) def lowercase_ ( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase__ ) def lowercase_ ( self ): '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) A__ = self.dummy_model() A__ = self.dummy_sample_deter * scheduler.init_noise_sigma A__ = sample.to(UpperCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): A__ = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = model(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) A__ = output.prev_sample A__ = torch.sum(torch.abs(UpperCAmelCase__ ) ) A__ = torch.mean(torch.abs(UpperCAmelCase__ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1e-2 assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3 else: assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def lowercase_ ( self ): '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(prediction_type="v_prediction" ) A__ = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) A__ = self.dummy_model() A__ = self.dummy_sample_deter * scheduler.init_noise_sigma A__ = sample.to(UpperCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): A__ = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = model(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) A__ = output.prev_sample A__ = torch.sum(torch.abs(UpperCAmelCase__ ) ) A__ = torch.mean(torch.abs(UpperCAmelCase__ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1e-2 assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1e-2 assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3 else: assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1e-2 assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3 def lowercase_ ( self ): '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase__ ) A__ = self.dummy_model() A__ = self.dummy_sample_deter.to(UpperCAmelCase__ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: A__ = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = model(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) A__ = output.prev_sample A__ = torch.sum(torch.abs(UpperCAmelCase__ ) ) A__ = torch.mean(torch.abs(UpperCAmelCase__ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1e-2 assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3 else: assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def lowercase_ ( self ): '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**UpperCAmelCase__ , use_karras_sigmas=UpperCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase__ ) A__ = self.dummy_model() A__ = self.dummy_sample_deter.to(UpperCAmelCase__ ) * scheduler.init_noise_sigma A__ = sample.to(UpperCAmelCase__ ) for t in scheduler.timesteps: A__ = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = model(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) A__ = output.prev_sample A__ = torch.sum(torch.abs(UpperCAmelCase__ ) ) A__ = torch.mean(torch.abs(UpperCAmelCase__ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 else: assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
337
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) # TODO Update this UpperCamelCase_ = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'esm' def __init__( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=1026 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int , ): '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : Any =vocab_size lowercase : List[Any] =hidden_size lowercase : Any =num_hidden_layers lowercase : Optional[Any] =num_attention_heads lowercase : Tuple =intermediate_size lowercase : int =hidden_dropout_prob lowercase : Dict =attention_probs_dropout_prob lowercase : Optional[int] =max_position_embeddings lowercase : Union[str, Any] =initializer_range lowercase : Tuple =layer_norm_eps lowercase : Union[str, Any] =position_embedding_type lowercase : List[Any] =use_cache lowercase : Dict =emb_layer_norm_before lowercase : Optional[Any] =token_dropout lowercase : Union[str, Any] =is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) lowercase : Any =EsmFoldConfig() elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase : Optional[int] =EsmFoldConfig(**UpperCAmelCase__ ) lowercase : Union[str, Any] =esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) lowercase : int =get_default_vocab_list() else: lowercase : Tuple =vocab_list else: lowercase : Union[str, Any] =None lowercase : Dict =None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Union[str, Any] =super().to_dict() if isinstance(self.esmfold_config , UpperCAmelCase__ ): lowercase : Optional[Any] =self.esmfold_config.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = None lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = 0 lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' if self.trunk is None: lowercase : str =TrunkConfig() elif isinstance(self.trunk , UpperCAmelCase__ ): lowercase : int =TrunkConfig(**self.trunk ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =asdict(self ) lowercase : Union[str, Any] =self.trunk.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 48 lowerCamelCase_ = 10_24 lowerCamelCase_ = 1_28 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = False lowerCamelCase_ = 4 lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' if self.structure_module is None: lowercase : Any =StructureModuleConfig() elif isinstance(self.structure_module , UpperCAmelCase__ ): lowercase : Union[str, Any] =StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowercase : str =self.sequence_state_dim // self.sequence_head_width lowercase : int =self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : List[Any] =asdict(self ) lowercase : Any =self.structure_module.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 3_84 lowerCamelCase_ = 1_28 lowerCamelCase_ = 16 lowerCamelCase_ = 1_28 lowerCamelCase_ = 12 lowerCamelCase_ = 4 lowerCamelCase_ = 8 lowerCamelCase_ = 0.1 lowerCamelCase_ = 8 lowerCamelCase_ = 1 lowerCamelCase_ = 2 lowerCamelCase_ = 7 lowerCamelCase_ = 10 lowerCamelCase_ = 1E-8 lowerCamelCase_ = 1E5 def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return asdict(self ) def _lowerCAmelCase ( ) -> Optional[int]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
92
0
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm __A = logging.get_logger(__name__) @dataclass class snake_case ( lowercase__ ): SCREAMING_SNAKE_CASE_ : Any = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self : Tuple , **UpperCamelCase__ : int)-> Any: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __lowerCAmelCase: Optional[int] = deprecated_arg[3:] setattr(self , UpperCAmelCase__ , not kwargs.pop(UpperCAmelCase__)) logger.warning( f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or" f" {positive_arg}={kwargs[positive_arg]}") __lowerCAmelCase: int = kwargs.pop("torchscript" , self.torchscript) __lowerCAmelCase: List[str] = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics) __lowerCAmelCase: int = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level) super().__init__(**UpperCAmelCase__) SCREAMING_SNAKE_CASE_ : List[str] = field(default=lowercase__, metadata={"""help""": """Trace the models using torchscript"""} ) SCREAMING_SNAKE_CASE_ : str = field(default=lowercase__, metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} ) SCREAMING_SNAKE_CASE_ : Any = field( default="""O1""", metadata={ """help""": ( """For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. """ """See details at https://nvidia.github.io/apex/amp.html""" ) }, ) @cached_property def lowercase_ ( self : List[Any])-> int: '''simple docstring''' requires_backends(self , ["torch"]) logger.info("PyTorch: setting up devices") if not self.cuda: __lowerCAmelCase: List[Any] = torch.device("cpu") __lowerCAmelCase: int = 0 elif is_torch_tpu_available(): __lowerCAmelCase: Optional[Any] = xm.xla_device() __lowerCAmelCase: int = 0 else: __lowerCAmelCase: Optional[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu") __lowerCAmelCase: str = torch.cuda.device_count() return device, n_gpu @property def lowercase_ ( self : Tuple)-> Any: '''simple docstring''' return is_torch_tpu_available() and self.tpu @property def lowercase_ ( self : int)-> Union[str, Any]: '''simple docstring''' requires_backends(self , ["torch"]) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def lowercase_ ( self : int)-> Union[str, Any]: '''simple docstring''' requires_backends(self , ["torch"]) return self._setup_devices[0] @property def lowercase_ ( self : Union[str, Any])-> List[str]: '''simple docstring''' requires_backends(self , ["torch"]) return self._setup_devices[1] @property def lowercase_ ( self : Any)-> Dict: '''simple docstring''' return self.n_gpu > 0
346
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCamelCase_ = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple: config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def _lowerCAmelCase ( __magic_name__ : int ) -> Any: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Any ) -> Any: from transformers.testing_utils import pytest_terminal_summary_main lowercase : Optional[Any] =terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]: # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: lowercase : Optional[int] =0 # Doctest custom flag to ignore output. UpperCamelCase_ = doctest.register_optionflag("""IGNORE_RESULT""") UpperCamelCase_ = doctest.OutputChecker class __SCREAMING_SNAKE_CASE ( lowercase__ ): def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ): '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_ = CustomOutputChecker UpperCamelCase_ = HfDoctestModule UpperCamelCase_ = HfDocTestParser
92
0
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCamelCase_ : '''simple docstring''' def __init__( self , snake_case_ , snake_case_=9_9 , snake_case_=1_3 , snake_case_=7 , snake_case_=9 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_=8 , snake_case_=0.1 , snake_case_=0.0_0_2 , snake_case_=1 , snake_case_=0 , snake_case_=0 , snake_case_=None , snake_case_=None , ) -> Optional[Any]: '''simple docstring''' __lowercase = parent __lowercase = batch_size __lowercase = encoder_seq_length __lowercase = decoder_seq_length # For common tests __lowercase = self.decoder_seq_length __lowercase = is_training __lowercase = use_attention_mask __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = d_ff __lowercase = relative_attention_num_buckets __lowercase = dropout_rate __lowercase = initializer_factor __lowercase = eos_token_id __lowercase = pad_token_id __lowercase = decoder_start_token_id __lowercase = None __lowercase = decoder_layers def A ( self ) -> List[str]: '''simple docstring''' return TaConfig.from_pretrained('''google/umt5-base''' ) def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , ) -> Optional[int]: '''simple docstring''' if attention_mask is None: __lowercase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __lowercase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __lowercase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase__ ) if decoder_head_mask is None: __lowercase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ ) if cross_attn_head_mask is None: __lowercase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def A ( self ) -> Any: '''simple docstring''' __lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __lowercase = input_ids.clamp(self.pad_token_id + 1 ) __lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 ) __lowercase = self.get_config() __lowercase = config.num_attention_heads __lowercase = self.prepare_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return config, input_dict def A ( self ) -> Any: '''simple docstring''' __lowercase = self.prepare_config_and_inputs() return config, inputs_dict def A ( self ) -> List[str]: '''simple docstring''' return TaConfig( vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def A ( self ) -> List[str]: '''simple docstring''' return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Optional[Any]: '''simple docstring''' __lowercase = UMTaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = model( input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , ) __lowercase = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ ) __lowercase = result.last_hidden_state __lowercase = result.past_key_values __lowercase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(UpperCAmelCase__ ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> List[str]: '''simple docstring''' __lowercase = UMTaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval() # first forward pass __lowercase = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ ) self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) ) self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) + 1 ) __lowercase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowercase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __lowercase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowercase = model(UpperCAmelCase__ )['''last_hidden_state'''] __lowercase = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )['''last_hidden_state'''] # select random slice __lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowercase = output_from_no_past[:, -1, random_slice_idx].detach() __lowercase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) ) def A ( self , snake_case_ , snake_case_ , ) -> Dict: '''simple docstring''' __lowercase = UMTaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).half().eval() __lowercase = model(**UpperCAmelCase__ )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(UpperCAmelCase__ ).any().item() ) @require_torch class lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' __UpperCAmelCase = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) __UpperCAmelCase = (UMTaForConditionalGeneration,) if is_torch_available() else () __UpperCAmelCase = ( { "conversational": UMTaForConditionalGeneration, "feature-extraction": UMTaModel, "summarization": UMTaForConditionalGeneration, "text2text-generation": UMTaForConditionalGeneration, "translation": UMTaForConditionalGeneration, "question-answering": UMTaForQuestionAnswering, } if is_torch_available() else {} ) __UpperCAmelCase = True __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = True __UpperCAmelCase = True # The small UMT5 model needs higher percentages for CPU/MP tests __UpperCAmelCase = [0.8, 0.9] def A ( self ) -> Dict: '''simple docstring''' __lowercase = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def A ( self ) -> int: '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() __lowercase = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( UpperCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=UpperCAmelCase__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def A ( self ) -> Optional[int]: '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__ ) def A ( self ) -> Union[str, Any]: '''simple docstring''' __lowercase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] __lowercase = self.model_tester.prepare_config_and_inputs() __lowercase = config_and_inputs[0] __lowercase = UMTaForConditionalGeneration(UpperCAmelCase__ ).eval() model.to(UpperCAmelCase__ ) __lowercase = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase__ ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ), } for attn_name, (name, mask) in zip(UpperCAmelCase__ , head_masking.items() ): __lowercase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __lowercase = torch.ones( config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ) __lowercase = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , **UpperCAmelCase__ , ) # We check the state of decoder_attentions and cross_attentions just from the last step __lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def A ( self ) -> List[Any]: '''simple docstring''' pass @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def A ( self ) -> Any: '''simple docstring''' __lowercase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ ) __lowercase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=UpperCAmelCase__ , legacy=UpperCAmelCase__ ) __lowercase = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] __lowercase = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' , padding=UpperCAmelCase__ ).input_ids # fmt: off __lowercase = torch.tensor( [ [ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1], ] ) # fmt: on torch.testing.assert_allclose(UpperCAmelCase__ , UpperCAmelCase__ ) __lowercase = model.generate(input_ids.to(UpperCAmelCase__ ) ) __lowercase = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] __lowercase = tokenizer.batch_decode(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
639
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = ['pixel_values'] def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : str , ): '''simple docstring''' super().__init__(**UpperCAmelCase__ ) lowercase : Union[str, Any] =do_rescale lowercase : List[Any] =rescale_factor lowercase : Tuple =do_pad lowercase : List[str] =pad_size def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] ): '''simple docstring''' return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' lowercase , lowercase : Union[str, Any] =get_image_size(UpperCAmelCase__ ) lowercase : Tuple =(old_height // size + 1) * size - old_height lowercase : Tuple =(old_width // size + 1) * size - old_width return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ): '''simple docstring''' lowercase : int =do_rescale if do_rescale is not None else self.do_rescale lowercase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : int =do_pad if do_pad is not None else self.do_pad lowercase : List[Any] =pad_size if pad_size is not None else self.pad_size lowercase : Any =make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. lowercase : Dict =[to_numpy_array(UpperCAmelCase__ ) for image in images] if do_rescale: lowercase : Tuple =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images] if do_pad: lowercase : Union[str, Any] =[self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images] lowercase : Dict =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images] lowercase : Any ={'''pixel_values''': images} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
92
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer __SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __SCREAMING_SNAKE_CASE = { 'vocab_file': { 'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt', }, 'tokenizer_file': { 'unc-nlp/lxmert-base-uncased': ( 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json' ), }, } __SCREAMING_SNAKE_CASE = { 'unc-nlp/lxmert-base-uncased': 512, } __SCREAMING_SNAKE_CASE = { 'unc-nlp/lxmert-base-uncased': {'do_lower_case': True}, } class lowerCAmelCase_ ( lowercase__ ): '''simple docstring''' _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_INIT_CONFIGURATION _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = LxmertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ): super().__init__( UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE_ : Union[str, Any] =getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) ) SCREAMING_SNAKE_CASE_ : int =do_lower_case SCREAMING_SNAKE_CASE_ : Tuple =strip_accents SCREAMING_SNAKE_CASE_ : Any =tokenize_chinese_chars SCREAMING_SNAKE_CASE_ : Union[str, Any] =normalizer_class(**UpperCAmelCase__ ) SCREAMING_SNAKE_CASE_ : int =do_lower_case def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ): SCREAMING_SNAKE_CASE_ : Union[str, Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): SCREAMING_SNAKE_CASE_ : List[Any] =[self.sep_token_id] SCREAMING_SNAKE_CASE_ : List[str] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): SCREAMING_SNAKE_CASE_ : List[Any] =self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ ) return tuple(UpperCAmelCase__ )
220
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """MBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """MBartForCausalLM""", """MBartForConditionalGeneration""", """MBartForQuestionAnswering""", """MBartForSequenceClassification""", """MBartModel""", """MBartPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """TFMBartForConditionalGeneration""", """TFMBartModel""", """TFMBartPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """FlaxMBartForConditionalGeneration""", """FlaxMBartForQuestionAnswering""", """FlaxMBartForSequenceClassification""", """FlaxMBartModel""", """FlaxMBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
def _snake_case (_snake_case : Optional[int]) -> Union[str, Any]: return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def _snake_case (_snake_case : dict[int, list[int]]) -> list[tuple[int, int]]: _lowercase =0 _lowercase =len(_snake_case) # No of vertices in graph _lowercase =[0] * n _lowercase =[False] * n def dfs(_snake_case : Optional[Any] , _snake_case : Dict , _snake_case : int , _snake_case : Optional[int]): _lowercase =True _lowercase =id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(_snake_case , _snake_case , _snake_case , id_) _lowercase =min(low[at] , low[to]) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at)) else: # This edge is a back edge and cannot be a bridge _lowercase =min(low[at] , low[to]) _lowercase =[] for i in range(_snake_case): if not visited[i]: dfs(_snake_case , -1 , _snake_case , id_) return bridges if __name__ == "__main__": import doctest doctest.testmod()
181
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
"""simple docstring""" import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class a ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ = StableDiffusionControlNetImgaImgPipeline snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} ) snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) torch.manual_seed(0 ) lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) lowerCAmelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , ) torch.manual_seed(0 ) lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) lowerCAmelCase = CLIPTextModel(UpperCAmelCase__ ) lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowerCAmelCase = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(UpperCAmelCase__ ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(UpperCAmelCase__ ) else: lowerCAmelCase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) lowerCAmelCase = 2 lowerCAmelCase = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , ) lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ ) lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase = Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('RGB' ).resize((64, 64) ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCamelCase__ ( self ): """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class a ( lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ = StableDiffusionControlNetImgaImgPipeline snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(_snake_case ): if isinstance(UpperCAmelCase__ , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(UpperCAmelCase__ ) torch.manual_seed(0 ) lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(UpperCAmelCase__ ) torch.manual_seed(0 ) lowerCAmelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , ) torch.manual_seed(0 ) lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) lowerCAmelCase = CLIPTextModel(UpperCAmelCase__ ) lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] ) lowerCAmelCase = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(UpperCAmelCase__ ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(UpperCAmelCase__ ) else: lowerCAmelCase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) lowerCAmelCase = 2 lowerCAmelCase = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , ), ] lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ ) lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase = Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('RGB' ).resize((64, 64) ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = self.pipeline_class(**UpperCAmelCase__ ) pipe.to(UpperCAmelCase__ ) lowerCAmelCase = 10.0 lowerCAmelCase = 4 lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase__ ) lowerCAmelCase = steps lowerCAmelCase = scale lowerCAmelCase = pipe(**UpperCAmelCase__ )[0] lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase__ ) lowerCAmelCase = steps lowerCAmelCase = scale lowerCAmelCase = pipe(**UpperCAmelCase__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase__ ) lowerCAmelCase = steps lowerCAmelCase = scale lowerCAmelCase = pipe(**UpperCAmelCase__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase__ ) lowerCAmelCase = steps lowerCAmelCase = scale lowerCAmelCase = pipe(**UpperCAmelCase__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = self.pipeline_class(**UpperCAmelCase__ ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(UpperCAmelCase__ ) except NotImplementedError: pass @slow @require_torch_gpu class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' ) lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase__ , controlnet=UpperCAmelCase__ ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 ) lowerCAmelCase = '''evil space-punk bird''' lowerCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_12, 5_12) ) lowerCAmelCase = load_image( 'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_12, 5_12) ) lowerCAmelCase = pipe( UpperCAmelCase__ , UpperCAmelCase__ , control_image=UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='np' , num_inference_steps=50 , strength=0.6 , ) lowerCAmelCase = output.images[0] assert image.shape == (5_12, 5_12, 3) lowerCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' ) assert np.abs(expected_image - image ).max() < 9E-2
4
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCamelCase_ = logging.getLogger(__name__) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]: return (preds == labels).mean() @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} ) lowerCamelCase_ = field(metadata={'help': 'Should contain the data files for the task.'} ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def _lowerCAmelCase ( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowercase , lowercase , lowercase : List[Any] =parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __magic_name__ ) # Set seed set_seed(training_args.seed ) try: lowercase : Any =processors[data_args.task_name]() lowercase : Optional[int] =processor.get_labels() lowercase : str =len(__magic_name__ ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase : List[str] =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) lowercase : int =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase : Any =AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , ) # Get datasets lowercase : int =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowercase : Union[str, Any] =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__magic_name__ : EvalPrediction ) -> Dict: lowercase : Dict =np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(__magic_name__ , p.label_ids )} # Data collator lowercase : List[str] =DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowercase : Dict =Trainer( model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowercase : Optional[Any] ={} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowercase : List[Any] =trainer.evaluate() lowercase : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(__magic_name__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ ) writer.write('''%s = %s\n''' % (key, value) ) results.update(__magic_name__ ) return results def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
92
0
def __lowerCamelCase ( _lowercase = 600851475143 ) -> int: try: UpperCamelCase = int(_lowercase ) except (TypeError, ValueError): raise TypeError('Parameter n must be int or castable to int.' ) if n <= 0: raise ValueError('Parameter n must be greater than or equal to one.' ) UpperCamelCase = 2 UpperCamelCase = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 UpperCamelCase = i while n % i == 0: UpperCamelCase = n // i i += 1 return int(_lowercase ) if __name__ == "__main__": print(F"{solution() = }")
282
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu""" def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple=100 , __magic_name__ : Optional[int]=" " ) -> List[str]: lowercase : List[Any] =text.split(__magic_name__ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )] def _lowerCAmelCase ( __magic_name__ : dict ) -> dict: lowercase , lowercase : int =[], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(__magic_name__ ): titles.append(title if title is not None else '''''' ) texts.append(__magic_name__ ) return {"title": titles, "text": texts} def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : DPRContextEncoder , __magic_name__ : DPRContextEncoderTokenizerFast ) -> dict: lowercase : Dict =ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=__magic_name__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids'''] lowercase : Optional[int] =ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def _lowerCAmelCase ( __magic_name__ : "RagExampleArguments" , __magic_name__ : "ProcessingArguments" , __magic_name__ : "IndexHnswArguments" , ) -> str: ###################################### logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase : Tuple =load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase : Optional[int] =dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc ) # And compute the embeddings lowercase : Any =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ ) lowercase : Any =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowercase : Optional[int] =Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space lowercase : Optional[Any] =dataset.map( partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , ) # And finally save your dataset lowercase : Optional[Any] =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(__magic_name__ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase : Union[str, Any] =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=__magic_name__ ) # And save the index lowercase : Dict =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(__magic_name__ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) lowerCamelCase_ = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) lowerCamelCase_ = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=lowercase__ , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) lowerCamelCase_ = field( default=16 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=7_68 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
92
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __magic_name__ = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ """WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """WavLMForAudioFrameClassification""", """WavLMForCTC""", """WavLMForSequenceClassification""", """WavLMForXVector""", """WavLMModel""", """WavLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
129
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right UpperCamelCase_ = 128022 UpperCamelCase_ = 128028 @require_sentencepiece class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = MaMaaaTokenizer lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = True def lowerCamelCase_ ( self : Dict ): '''simple docstring''' super().setUp() lowercase : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] lowercase : List[Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) lowercase : List[Any] =Path(self.tmpdirname ) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowercase : Tuple =MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : int ): '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict ): '''simple docstring''' return ( "This is a test", "This is a test", ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Tuple ='''</s>''' lowercase : Union[str, Any] =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.get_tokenizer() lowercase : Optional[Any] =list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''</s>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''<s>''' ) self.assertEqual(len(UpperCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('''Skip this test while all models are still to be uploaded.''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Union[str, Any] =self.get_tokenizer() lowercase : str =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2, 3, 4, 5, 6] , ) lowercase : Optional[int] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) lowercase : Tuple =tokenizer.convert_tokens_to_string(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , '''This is a test''' ) @slow def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' # fmt: off lowercase : int ={'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): lowerCamelCase_ = 'facebook/m2m100_418M' lowerCamelCase_ = [ 'In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence', ] lowerCamelCase_ = [ 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', ] # fmt: off lowerCamelCase_ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2] @classmethod def lowerCamelCase_ ( cls : Optional[Any] ): '''simple docstring''' lowercase : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' ) lowercase : Optional[int] =1 return cls def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 ) self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 ) self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 ) self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : List[str] =self.tokenizer.get_vocab() self.assertEqual(len(UpperCAmelCase__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab['''<unk>'''] , 3 ) self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : List[Any] ='''en''' lowercase : Optional[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids ) # fmt: off lowercase : str =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on lowercase : Optional[Any] =self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) lowercase : Optional[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =tempfile.mkdtemp() lowercase : Tuple =self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(UpperCAmelCase__ ) lowercase : Union[str, Any] =MaMaaaTokenizer.from_pretrained(UpperCAmelCase__ ) self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase__ ) @require_torch def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[str] ='''en''' lowercase : int ='''fr''' lowercase : Union[str, Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors='''pt''' ) lowercase : str =shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: lowercase : int =batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[int] ='''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) lowercase : Union[str, Any] ='''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int ='''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) lowercase : Optional[Any] ='''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Optional[Any] =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' ) self.assertEqual( nested_simplify(UpperCAmelCase__ ) , { # en_XX, A, test, EOS '''input_ids''': [[128022, 58, 4183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128006, } , )
92
0
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class _lowerCAmelCase : def __init__( self ) -> Optional[int]: lowerCAmelCase_ = '''''' lowerCAmelCase_ = '''''' lowerCAmelCase_ = [] lowerCAmelCase_ = 0 lowerCAmelCase_ = 256 lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 def __a ( self , _UpperCamelCase ) -> Dict: lowerCAmelCase_ = cva.imread(UpperCAmelCase__ , 0 ) lowerCAmelCase_ = copy.deepcopy(self.img ) lowerCAmelCase_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" ) lowerCAmelCase_ = np.sum(UpperCAmelCase__ ) for i in range(len(UpperCAmelCase__ ) ): lowerCAmelCase_ = x[i] / self.k self.sk += prk lowerCAmelCase_ = (self.L - 1) * self.sk if self.rem != 0: lowerCAmelCase_ = int(last % last ) lowerCAmelCase_ = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(UpperCAmelCase__ ) lowerCAmelCase_ = int(np.ma.count(self.img ) / self.img[1].size ) lowerCAmelCase_ = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCAmelCase_ = self.img[j][i] if num != self.last_list[num]: lowerCAmelCase_ = self.last_list[num] cva.imwrite("output_data/output.jpg" , self.img ) def __a ( self ) -> List[str]: plt.hist(self.img.ravel() , 256 , [0, 256] ) def __a ( self ) -> Union[str, Any]: cva.imshow("Output-Image" , self.img ) cva.imshow("Input-Image" , self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": _A = os.path.join(os.path.basename(__file__), "image_data/input.jpg") _A = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
290
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int: try: lowercase : Any =int(__magic_name__ ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) lowercase : Optional[Any] =2 lowercase : Dict =0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 lowercase : Union[str, Any] =i while n % i == 0: lowercase : Optional[int] =n // i i += 1 return int(__magic_name__ ) if __name__ == "__main__": print(f'''{solution() = }''')
92
0
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class snake_case__ : def __init__( self : Dict , __a : Union[str, Any]=2 , __a : str=3 , __a : Optional[int]=64 , __a : Union[str, Any]=None ) -> str: '''simple docstring''' __snake_case : Optional[Any] = np.random.default_rng(UpperCAmelCase__ ) __snake_case : Union[str, Any] = length __snake_case : List[Any] = rng.normal(size=(length,) ).astype(np.floataa ) __snake_case : Dict = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self : Tuple ) -> Union[str, Any]: '''simple docstring''' return self.length def __getitem__( self : Union[str, Any] , __a : Any ) -> Dict: '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class snake_case__ ( torch.nn.Module ): def __init__( self : Union[str, Any] , __a : List[Any]=0 , __a : List[Any]=0 , __a : str=False ) -> int: '''simple docstring''' super().__init__() __snake_case : Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __snake_case : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __snake_case : List[Any] = True def A_ ( self : str , __a : Any=None ) -> Tuple: '''simple docstring''' if self.first_batch: print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) __snake_case : Any = False return x * self.a[0] + self.b[0] class snake_case__ ( torch.nn.Module ): def __init__( self : List[Any] , __a : Optional[Any]=0 , __a : int=0 , __a : Optional[Any]=False ) -> Optional[int]: '''simple docstring''' super().__init__() __snake_case : Optional[int] = torch.nn.Parameter(torch.tensor(UpperCAmelCase__ ).float() ) __snake_case : int = torch.nn.Parameter(torch.tensor(UpperCAmelCase__ ).float() ) __snake_case : Tuple = True def A_ ( self : List[Any] , __a : Optional[Any]=None ) -> Optional[Any]: '''simple docstring''' if self.first_batch: print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) __snake_case : List[Any] = False return x * self.a + self.b def a_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : int = 16 ) -> Union[str, Any]: from datasets import load_dataset from transformers import AutoTokenizer __snake_case : Dict = AutoTokenizer.from_pretrained('bert-base-cased' ) __snake_case : Optional[int] = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} __snake_case : Dict = load_dataset('csv' ,data_files=_UpperCAmelCase ) __snake_case : int = datasets['''train'''].unique('label' ) __snake_case : List[str] = {v: i for i, v in enumerate(_UpperCAmelCase )} def tokenize_function(_UpperCAmelCase : Dict ): # max_length=None => use the model max length (it's actually the default) __snake_case : Dict = tokenizer( examples['sentence1'] ,examples['sentence2'] ,truncation=_UpperCAmelCase ,max_length=_UpperCAmelCase ,padding='max_length' ) if "label" in examples: __snake_case : List[Any] = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __snake_case : Optional[int] = datasets.map( _UpperCAmelCase ,batched=_UpperCAmelCase ,remove_columns=['sentence1', 'sentence2', 'label'] ,) def collate_fn(_UpperCAmelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_UpperCAmelCase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' ) return tokenizer.pad(_UpperCAmelCase ,padding='longest' ,return_tensors='pt' ) # Instantiate dataloaders. __snake_case : Union[str, Any] = DataLoader(tokenized_datasets['train'] ,shuffle=_UpperCAmelCase ,collate_fn=_UpperCAmelCase ,batch_size=2 ) __snake_case : Tuple = DataLoader(tokenized_datasets['validation'] ,shuffle=_UpperCAmelCase ,collate_fn=_UpperCAmelCase ,batch_size=1 ) return train_dataloader, eval_dataloader
286
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'speech_to_text_2' lowerCamelCase_ = ['past_key_values'] lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : int , UpperCAmelCase__ : Dict=10000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1024 , **UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : List[str] =vocab_size lowercase : Optional[int] =d_model lowercase : Optional[Any] =decoder_ffn_dim lowercase : Any =decoder_layers lowercase : Dict =decoder_attention_heads lowercase : List[Any] =dropout lowercase : List[Any] =attention_dropout lowercase : Any =activation_dropout lowercase : Optional[Any] =activation_function lowercase : Optional[int] =init_std lowercase : Dict =decoder_layerdrop lowercase : Optional[int] =use_cache lowercase : Optional[Any] =decoder_layers lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True lowercase : str =max_target_positions super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
92
0
from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal snake_case : Union[str, Any] = logging.get_logger(__name__) snake_case : Any = TypeVar('DatasetType', Dataset, IterableDataset) def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ = None ,UpperCAmelCase__ = None ,UpperCAmelCase__ = None ,UpperCAmelCase__ = None ,UpperCAmelCase__ = "first_exhausted" ,): """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(UpperCAmelCase__ ): if not isinstance(UpperCAmelCase__ ,(Dataset, IterableDataset) ): if isinstance(UpperCAmelCase__ ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ''' 'is an empty dataset dictionary.' ) raise ValueError( f'''Dataset at position {i} has at least one split: {list(UpperCAmelCase__ )}\n''' f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCAmelCase__ ) )}\']''' ) raise ValueError( f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCAmelCase__ ).__name__}.''' ) if i == 0: _SCREAMING_SNAKE_CASE = ( (Dataset, IterableDataset) if isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ): raise ValueError( f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' ) if dataset_type is Dataset: return _interleave_map_style_datasets( UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,info=UpperCAmelCase__ ,split=UpperCAmelCase__ ,stopping_strategy=UpperCAmelCase__ ) else: return _interleave_iterable_datasets( UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,info=UpperCAmelCase__ ,split=UpperCAmelCase__ ,stopping_strategy=UpperCAmelCase__ ) def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ = None ,UpperCAmelCase__ = None ,UpperCAmelCase__ = 0 ,): """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(UpperCAmelCase__ ): if not isinstance(UpperCAmelCase__ ,(Dataset, IterableDataset) ): if isinstance(UpperCAmelCase__ ,(DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ''' 'is an empty dataset dictionary.' ) raise ValueError( f'''Dataset at position {i} has at least one split: {list(UpperCAmelCase__ )}\n''' f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCAmelCase__ ) )}\']''' ) raise ValueError( f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCAmelCase__ ).__name__}.''' ) if i == 0: _SCREAMING_SNAKE_CASE = ( (Dataset, IterableDataset) if isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ): raise ValueError( f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(UpperCAmelCase__ ,info=UpperCAmelCase__ ,split=UpperCAmelCase__ ,axis=UpperCAmelCase__ ) else: return _concatenate_iterable_datasets(UpperCAmelCase__ ,info=UpperCAmelCase__ ,split=UpperCAmelCase__ ,axis=UpperCAmelCase__ )
605
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=[10, 20, 30, 40] , UpperCAmelCase__ : Any=[2, 2, 3, 2] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Dict=[2, 3, 4] , UpperCAmelCase__ : Optional[int]=None , ): '''simple docstring''' lowercase : List[Any] =parent lowercase : Tuple =batch_size lowercase : List[str] =image_size lowercase : List[Any] =num_channels lowercase : Union[str, Any] =num_stages lowercase : int =hidden_sizes lowercase : Any =depths lowercase : Tuple =is_training lowercase : str =use_labels lowercase : List[Any] =intermediate_size lowercase : int =hidden_act lowercase : Union[str, Any] =num_labels lowercase : Optional[int] =initializer_range lowercase : int =out_features lowercase : List[str] =out_indices lowercase : str =scope def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : Dict =None if self.use_labels: lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_labels ) lowercase : Dict =self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Any ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' lowercase : Dict =ConvNextVaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Dict =ConvNextVaForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Union[str, Any] =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[int] =model(UpperCAmelCase__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase : Optional[Any] =None lowercase : str =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Any =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : str =config_and_inputs lowercase : Any ={'''pixel_values''': pixel_values} return config, inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : str =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : List[str] =config_and_inputs lowercase : Optional[Any] ={'''pixel_values''': pixel_values, '''labels''': labels} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCamelCase_ = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Dict =ConvNextVaModelTester(self ) lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Any ): '''simple docstring''' return @unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : Optional[int] =True if model_class.__name__ in [ *get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ ), ]: continue lowercase : Dict =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : List[Any] =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : List[Any] =False lowercase : Any =True if ( model_class.__name__ in [*get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ )] or not model_class.supports_gradient_checkpointing ): continue lowercase : Any =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.gradient_checkpointing_enable() model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : int =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Dict =model_class(UpperCAmelCase__ ) lowercase : Union[str, Any] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : int =[*signature.parameters.keys()] lowercase : Optional[Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ): lowercase : int =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase : Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase : List[Any] =self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : List[str] =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase : Tuple =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : List[Any] =ConvNextVaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def _lowerCAmelCase ( ) -> List[Any]: lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase__ ) lowercase : int =self.default_image_processor lowercase : List[str] =prepare_img() lowercase : List[Any] =preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ ) # forward pass with torch.no_grad(): lowercase : Dict =model(**UpperCAmelCase__ ) # verify the logits lowercase : Optional[Any] =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase__ ) lowercase : Tuple =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
"""simple docstring""" def __a ( A = 4_000_000 ) -> int: '''simple docstring''' A__ = [] A__ = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(A ) A__ = b, a + b return sum(A ) if __name__ == "__main__": print(F'''{solution() = }''')
337
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels UpperCamelCase_ = object() # For specifying empty leaf dict `{}` UpperCamelCase_ = object() def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Optional[int]: lowercase : Optional[Any] =tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ): lowercase : Union[str, Any] =[x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )] if matches and all(__magic_name__ ): return True return False def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[str]: def replace(__magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ): for rule, replacement in rules: if _match(__magic_name__ , __magic_name__ ): return replacement return val return replace def _lowerCAmelCase ( ) -> int: return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , __magic_name__ )), (("transformer", "wte", "embedding"), P('''mp''' , __magic_name__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , __magic_name__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__magic_name__ , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , __magic_name__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _lowerCAmelCase ( __magic_name__ : str ) -> int: lowercase : int =_get_partition_rules() lowercase : Tuple =_replacement_rules(__magic_name__ ) lowercase : Any ={k: _unmatched for k in flatten_dict(__magic_name__ )} lowercase : Any ={k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__magic_name__ ) )
92
0
"""simple docstring""" import requests from bsa import BeautifulSoup def a__ ( __SCREAMING_SNAKE_CASE = "https://www.worldometers.info/coronavirus" ) -> dict: __lowerCAmelCase: List[Any] = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE ).text , "html.parser" ) __lowerCAmelCase: Tuple = soup.findAll("h1" ) __lowerCAmelCase: int = soup.findAll("div" , {"class": "maincounter-number"} ) keys += soup.findAll("span" , {"class": "panel-title"} ) values += soup.findAll("div" , {"class": "number-table-main"} ) return {key.text.strip(): value.text.strip() for key, value in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )} if __name__ == "__main__": print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n") for key, value in world_covidaa_stats().items(): print(F'''{key}\n{value}\n''')
346
'''simple docstring''' from collections import defaultdict def _lowerCAmelCase ( __magic_name__ : int ) -> int: lowercase : Optional[Any] =1 lowercase : Union[str, Any] =True for v in tree[start]: if v not in visited: ret += dfs(__magic_name__ ) if ret % 2 == 0: cuts.append(__magic_name__ ) return ret def _lowerCAmelCase ( ) -> int: dfs(1 ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 10, 9 UpperCamelCase_ = defaultdict(list) UpperCamelCase_ = {} UpperCamelCase_ = [] UpperCamelCase_ = 0 UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
92
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a : List[str] = logging.get_logger(__name__) def lowercase_ ( _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False ): '''simple docstring''' __lowercase = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'transformer.blocks.{i}.norm1.weight', F'vilt.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((F'transformer.blocks.{i}.norm1.bias', F'vilt.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (F'transformer.blocks.{i}.attn.proj.weight', F'vilt.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (F'transformer.blocks.{i}.attn.proj.bias', F'vilt.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((F'transformer.blocks.{i}.norm2.weight', F'vilt.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((F'transformer.blocks.{i}.norm2.bias', F'vilt.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append( (F'transformer.blocks.{i}.mlp.fc1.weight', F'vilt.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((F'transformer.blocks.{i}.mlp.fc1.bias', F'vilt.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.weight', F'vilt.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.bias', F'vilt.encoder.layer.{i}.output.dense.bias') ) # embeddings rename_keys.extend( [ # text embeddings ('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''), ( '''text_embeddings.position_embeddings.weight''', '''vilt.embeddings.text_embeddings.position_embeddings.weight''', ), ('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''), ( '''text_embeddings.token_type_embeddings.weight''', '''vilt.embeddings.text_embeddings.token_type_embeddings.weight''', ), ('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''), ('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''), # patch embeddings ('''transformer.cls_token''', '''vilt.embeddings.cls_token'''), ('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''), ('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''), ('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''), # token type embeddings ('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''), ] ) # final layernorm + pooler rename_keys.extend( [ ('''transformer.norm.weight''', '''vilt.layernorm.weight'''), ('''transformer.norm.bias''', '''vilt.layernorm.bias'''), ('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''), ('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('''vqa_classifier.0.weight''', '''classifier.0.weight'''), ('''vqa_classifier.0.bias''', '''classifier.0.bias'''), ('''vqa_classifier.1.weight''', '''classifier.1.weight'''), ('''vqa_classifier.1.bias''', '''classifier.1.bias'''), ('''vqa_classifier.3.weight''', '''classifier.3.weight'''), ('''vqa_classifier.3.bias''', '''classifier.3.bias'''), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''), ('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''), ('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''), ('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''), ('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''), ('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''), ] ) else: pass return rename_keys def lowercase_ ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' for i in range(config.num_hidden_layers ): __lowercase = '''vilt.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowercase = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.weight' ) __lowercase = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __lowercase = in_proj_weight[ : config.hidden_size, : ] __lowercase = in_proj_bias[: config.hidden_size] __lowercase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowercase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowercase = in_proj_weight[ -config.hidden_size :, : ] __lowercase = in_proj_bias[-config.hidden_size :] def lowercase_ ( _UpperCamelCase ): '''simple docstring''' __lowercase = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(_UpperCamelCase , _UpperCamelCase ) def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowercase = dct.pop(_UpperCamelCase ) __lowercase = val @torch.no_grad() def lowercase_ ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowercase = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=_UpperCamelCase ) __lowercase = False __lowercase = False __lowercase = False __lowercase = False if "vqa" in checkpoint_url: __lowercase = True __lowercase = 31_29 __lowercase = '''huggingface/label-files''' __lowercase = '''vqa2-id2label.json''' __lowercase = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) ) __lowercase = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} __lowercase = ViltForQuestionAnswering(_UpperCamelCase ) elif "nlvr" in checkpoint_url: __lowercase = True __lowercase = 2 __lowercase = {0: '''False''', 1: '''True'''} __lowercase = {v: k for k, v in config.idalabel.items()} __lowercase = 3 __lowercase = ViltForImagesAndTextClassification(_UpperCamelCase ) elif "irtr" in checkpoint_url: __lowercase = True __lowercase = ViltForImageAndTextRetrieval(_UpperCamelCase ) elif "mlm_itm" in checkpoint_url: __lowercase = True __lowercase = ViltForMaskedLM(_UpperCamelCase ) else: raise ValueError('''Unknown model type''' ) # load state_dict of original model, remove and rename some keys __lowercase = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='''cpu''' )['''state_dict'''] __lowercase = create_rename_keys(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for src, dest in rename_keys: rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) read_in_q_k_v(_UpperCamelCase , _UpperCamelCase ) if mlm_model or irtr_model: __lowercase = ['''itm_score.fc.weight''', '''itm_score.fc.bias'''] for k in ignore_keys: state_dict.pop(_UpperCamelCase , _UpperCamelCase ) # load state dict into HuggingFace model model.eval() if mlm_model: __lowercase = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(_UpperCamelCase ) # Define processor __lowercase = ViltImageProcessor(size=3_84 ) __lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) __lowercase = ViltProcessor(_UpperCamelCase , _UpperCamelCase ) # Forward pass on example inputs (image + text) if nlvr_model: __lowercase = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=_UpperCamelCase ).raw ) __lowercase = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=_UpperCamelCase ).raw ) __lowercase = ( '''The left image contains twice the number of dogs as the right image, and at least two dogs in total are''' ''' standing.''' ) __lowercase = processor(_UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' ) __lowercase = processor(_UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' ) __lowercase = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: __lowercase = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=_UpperCamelCase ).raw ) if mlm_model: __lowercase = '''a bunch of [MASK] laying on a [MASK].''' else: __lowercase = '''How many cats are there?''' __lowercase = processor(_UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' ) __lowercase = model(**_UpperCamelCase ) # Verify outputs if mlm_model: __lowercase = torch.Size([1, 11, 3_05_22] ) __lowercase = torch.tensor([-12.50_61, -12.51_23, -12.51_74] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , _UpperCamelCase , atol=1E-4 ) # verify masked token prediction equals "cats" __lowercase = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: __lowercase = torch.Size([1, 31_29] ) __lowercase = torch.tensor([-15.94_95, -18.14_72, -10.30_41] ) assert torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , _UpperCamelCase , atol=1E-4 ) # verify vqa prediction equals "2" __lowercase = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: __lowercase = torch.Size([1, 2] ) __lowercase = torch.tensor([-2.8721, 2.1291] ) assert torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) print(F'Saving model and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(_UpperCamelCase ) processor.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) a : Any = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
639
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase_ = logging.get_logger(__name__) def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Dict: lowercase : List[str] =R'''\w+[.]\d+''' lowercase : List[str] =re.findall(__magic_name__ , __magic_name__ ) for pat in pats: lowercase : Optional[int] =key.replace(__magic_name__ , '''_'''.join(pat.split('''.''' ) ) ) return key def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> str: lowercase : Dict =pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowercase : Dict =pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowercase : Tuple =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowercase : Tuple =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowercase : str =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowercase : Optional[Any] =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowercase : Dict =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowercase : Union[str, Any] =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=42 ) -> List[str]: # Step 1: Convert pytorch tensor to numpy lowercase : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowercase : str =flax_model.init_weights(PRNGKey(__magic_name__ ) ) lowercase : Dict =flatten_dict(__magic_name__ ) lowercase : Dict ={} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowercase : Dict =rename_key(__magic_name__ ) lowercase : Optional[int] =tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowercase , lowercase : Any =rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowercase : Tuple =jnp.asarray(__magic_name__ ) return unflatten_dict(__magic_name__ )
92
0
__SCREAMING_SNAKE_CASE = 'Alexander Joslin' import operator as op from .stack import Stack def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub} SCREAMING_SNAKE_CASE_ : Stack[int] =Stack() SCREAMING_SNAKE_CASE_ : Stack[str] =Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(lowerCAmelCase_ ) ) elif i in operators: # RULE 2 operator_stack.push(lowerCAmelCase_ ) elif i == ")": # RULE 4 SCREAMING_SNAKE_CASE_ : List[str] =operator_stack.peek() operator_stack.pop() SCREAMING_SNAKE_CASE_ : Dict =operand_stack.peek() operand_stack.pop() SCREAMING_SNAKE_CASE_ : Tuple =operand_stack.peek() operand_stack.pop() SCREAMING_SNAKE_CASE_ : Union[str, Any] =operators[opr](lowerCAmelCase_ ,lowerCAmelCase_ ) operand_stack.push(lowerCAmelCase_ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": __SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))' # answer = 45 print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
220
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. UpperCamelCase_ = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. UpperCamelCase_ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. UpperCamelCase_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]: lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] ) return (item, float(__magic_name__ )) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]: lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 ) lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:] lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str: lowercase : Union[str, Any] =list(__magic_name__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowercase : Dict =random.choice(__magic_name__ ) return "".join(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]: lowercase : Any =[] # Generate more children proportionally to the fitness score. lowercase : Dict =int(parent_a[1] * 100 ) + 1 lowercase : List[str] =10 if child_n >= 10 else child_n for _ in range(__magic_name__ ): lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0] lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ ) # Append new string to the population list. pop.append(mutate(__magic_name__ , __magic_name__ ) ) pop.append(mutate(__magic_name__ , __magic_name__ ) ) return pop def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(__magic_name__ ) # Verify that the target contains no genes besides the ones inside genes variable. lowercase : Optional[int] =sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(__magic_name__ ) # Generate random starting population. lowercase : int =[] for _ in range(__magic_name__ ): population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) ) # Just some logs to know what the algorithms is doing. lowercase , lowercase : Optional[int] =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__magic_name__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population] # Check if there is a matching evolution. lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowercase : Any =population[: int(N_POPULATION / 3 )] population.clear() population.extend(__magic_name__ ) # Normalize population score to be between 0 and 1. lowercase : Dict =[ (item, score / len(__magic_name__ )) for item, score in population_score ] # This is selection for i in range(__magic_name__ ): population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__magic_name__ ) > N_POPULATION: break if __name__ == "__main__": UpperCamelCase_ = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) UpperCamelCase_ = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
92
0
from __future__ import annotations from dataclasses import dataclass @dataclass class SCREAMING_SNAKE_CASE_ : """simple docstring""" __lowerCAmelCase : Optional[int] =4_2 __lowerCAmelCase : Union[str, Any] =None __lowerCAmelCase : Dict =None def _snake_case (_snake_case : TreeNode | None) -> bool: # Validation def is_valid_tree(_snake_case : TreeNode | None) -> bool: if node is None: return True if not isinstance(_snake_case , _snake_case): return False try: float(node.data) except (TypeError, ValueError): return False return is_valid_tree(node.left) and is_valid_tree(node.right) if not is_valid_tree(_snake_case): raise ValueError( 'Each node should be type of TreeNode and data should be float.') def is_binary_search_tree_recursive_check( _snake_case : TreeNode | None , _snake_case : float , _snake_case : float) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , _snake_case , node.data) and is_binary_search_tree_recursive_check( node.right , node.data , _snake_case) ) return is_binary_search_tree_recursive_check(_snake_case , -float('inf') , float('inf')) if __name__ == "__main__": import doctest doctest.testmod()
181
'''simple docstring''' import datasets UpperCamelCase_ = """\ @InProceedings{conneau2018xnli, author = \"Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin\", title = \"XNLI: Evaluating Cross-lingual Sentence Representations\", booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\", year = \"2018\", publisher = \"Association for Computational Linguistics\", location = \"Brussels, Belgium\", } """ UpperCamelCase_ = """\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). """ UpperCamelCase_ = """ Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: 'accuracy': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric(\"xnli\") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} """ def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): def lowerCamelCase_ ( self : str ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
92
0
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __UpperCamelCase : Union[str, Any] = 16 __UpperCamelCase : List[Any] = 32 def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Accelerator , _UpperCAmelCase : int = 16 ): lowerCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' ) lowerCAmelCase = load_dataset('glue' , 'mrpc' ) def tokenize_function(_UpperCAmelCase : Dict ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_UpperCAmelCase : Optional[int] ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase = 16 elif accelerator.mixed_precision != "no": lowerCAmelCase = 8 else: lowerCAmelCase = None return tokenizer.pad( _UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , ) # Instantiate dataloaders. lowerCAmelCase = DataLoader( tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) lowerCAmelCase = DataLoader( tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __UpperCamelCase : Any = mocked_dataloaders # noqa: F811 def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : str ): # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS' , _UpperCAmelCase ) == "1": lowerCAmelCase = 2 # New Code # lowerCAmelCase = int(args.gradient_accumulation_steps ) # Initialize accelerator lowerCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( 'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase = config['''lr'''] lowerCAmelCase = int(config['num_epochs'] ) lowerCAmelCase = int(config['seed'] ) lowerCAmelCase = int(config['batch_size'] ) lowerCAmelCase = evaluate.load('glue' , 'mrpc' ) set_seed(_UpperCAmelCase ) lowerCAmelCase = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase = AdamW(params=model.parameters() , lr=_UpperCAmelCase ) # Instantiate scheduler lowerCAmelCase = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Now we train the model for epoch in range(_UpperCAmelCase ): model.train() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_UpperCAmelCase ): lowerCAmelCase = model(**_UpperCAmelCase ) lowerCAmelCase = output.loss accelerator.backward(_UpperCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase = model(**_UpperCAmelCase ) lowerCAmelCase = outputs.logits.argmax(dim=-1 ) lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) lowerCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , _UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) # New Code # parser.add_argument( '--gradient_accumulation_steps' , type=_UpperCAmelCase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) lowerCAmelCase = parser.parse_args() lowerCAmelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": main()
4
'''simple docstring''' from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class __SCREAMING_SNAKE_CASE : def __init__( self : List[str] , UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : Any =parent lowercase : Optional[int] =13 lowercase : Union[str, Any] =7 lowercase : str =30 lowercase : Optional[int] =self.seq_length + self.mem_len lowercase : Dict =15 lowercase : List[str] =True lowercase : Optional[int] =True lowercase : Tuple =99 lowercase : str =[10, 50, 80] lowercase : List[Any] =32 lowercase : Optional[int] =32 lowercase : int =4 lowercase : Any =8 lowercase : List[Any] =128 lowercase : List[str] =2 lowercase : Tuple =2 lowercase : int =None lowercase : Optional[int] =1 lowercase : int =0 lowercase : List[str] =3 lowercase : str =self.vocab_size - 1 lowercase : Tuple =0.01 def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : str =None if self.use_labels: lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Union[str, Any] =TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' random.seed(self.seed ) tf.random.set_seed(self.seed ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Tuple =TFTransfoXLModel(UpperCAmelCase__ ) lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple() lowercase : List[str] ={'''input_ids''': input_ids_a, '''mems''': mems_a} lowercase , lowercase : Any =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : int =TFTransfoXLLMHeadModel(UpperCAmelCase__ ) lowercase , lowercase : Tuple =model(UpperCAmelCase__ ).to_tuple() lowercase : Optional[Any] ={'''input_ids''': input_ids_a, '''labels''': lm_labels} lowercase , lowercase : Optional[int] =model(UpperCAmelCase__ ).to_tuple() lowercase , lowercase : List[str] =model([input_ids_a, mems_a] ).to_tuple() lowercase : int ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels} lowercase , lowercase : str =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[int] =TFTransfoXLForSequenceClassification(UpperCAmelCase__ ) lowercase : Union[str, Any] =model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.prepare_config_and_inputs() ((lowercase) , (lowercase) , (lowercase) , (lowercase)) : Optional[Any] =config_and_inputs lowercase : Union[str, Any] ={'''input_ids''': input_ids_a} return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) lowerCamelCase_ = () if is_tf_available() else () lowerCamelCase_ = ( { 'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =TFTransfoXLModelTester(self ) lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' self.model_tester.set_seed() lowercase : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.model_tester.set_seed() lowercase : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common() lowercase : int =[TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: lowercase : str =model_class(UpperCAmelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: lowercase : Union[str, Any] =model.get_output_embeddings() assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer ) lowercase : Any =model.get_bias() assert name is None else: lowercase : Optional[int] =model.get_output_embeddings() assert x is None lowercase : Optional[int] =model.get_bias() assert name is None def lowerCamelCase_ ( self : Any ): '''simple docstring''' # TODO JP: Make TransfoXL XLA compliant pass @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : int =TFTransfoXLModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' ) def lowerCamelCase_ ( self : int ): '''simple docstring''' pass @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @unittest.skip('''Skip test until #12651 is resolved.''' ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' ) # fmt: off lowercase : Tuple =tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off lowercase : Optional[int] =[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> lowercase : int =model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ ) self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
92
0
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any: # load base model UpperCamelCase = StableDiffusionPipeline.from_pretrained(_lowercase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors UpperCamelCase = load_file(_lowercase ) UpperCamelCase = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: UpperCamelCase = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' ) UpperCamelCase = pipeline.text_encoder else: UpperCamelCase = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' ) UpperCamelCase = pipeline.unet # find the target layer UpperCamelCase = layer_infos.pop(0 ) while len(_lowercase ) > -1: try: UpperCamelCase = curr_layer.__getattr__(_lowercase ) if len(_lowercase ) > 0: UpperCamelCase = layer_infos.pop(0 ) elif len(_lowercase ) == 0: break except Exception: if len(_lowercase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: UpperCamelCase = layer_infos.pop(0 ) UpperCamelCase = [] if "lora_down" in key: pair_keys.append(key.replace('lora_down' , 'lora_up' ) ) pair_keys.append(_lowercase ) else: pair_keys.append(_lowercase ) pair_keys.append(key.replace('lora_up' , 'lora_down' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: UpperCamelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) UpperCamelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_lowercase , _lowercase ).unsqueeze(2 ).unsqueeze(3 ) else: UpperCamelCase = state_dict[pair_keys[0]].to(torch.floataa ) UpperCamelCase = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_lowercase , _lowercase ) # update visited list for item in pair_keys: visited.append(_lowercase ) return pipeline if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument( '''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.''' ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors''' ) parser.add_argument( '''--lora_prefix_text_encoder''', default='''lora_te''', type=str, help='''The prefix of text encoder weight in safetensors''', ) parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''') parser.add_argument( '''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.''' ) parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''') _snake_case = parser.parse_args() _snake_case = args.base_model_path _snake_case = args.checkpoint_path _snake_case = args.dump_path _snake_case = args.lora_prefix_unet _snake_case = args.lora_prefix_text_encoder _snake_case = args.alpha _snake_case = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) _snake_case = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
282
'''simple docstring''' import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE : def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=36 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Tuple=None , ): '''simple docstring''' lowercase : str =parent lowercase : int =batch_size lowercase : Any =seq_length lowercase : int =is_training lowercase : str =use_input_mask lowercase : int =use_token_type_ids lowercase : Dict =use_labels lowercase : int =vocab_size lowercase : str =embedding_size lowercase : Union[str, Any] =hidden_size lowercase : Tuple =num_hidden_layers lowercase : Any =num_hidden_groups lowercase : Union[str, Any] =num_attention_heads lowercase : Any =intermediate_size lowercase : Tuple =hidden_act lowercase : Optional[int] =hidden_dropout_prob lowercase : Union[str, Any] =attention_probs_dropout_prob lowercase : List[Any] =max_position_embeddings lowercase : int =type_vocab_size lowercase : int =type_sequence_label_size lowercase : Any =initializer_range lowercase : List[Any] =num_labels lowercase : int =num_choices lowercase : Optional[int] =scope def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[int] =None if self.use_input_mask: lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : Dict =None if self.use_token_type_ids: lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : Tuple =None lowercase : Any =None lowercase : Dict =None if self.use_labels: lowercase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices ) lowercase : Any =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : int =AlbertModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : int =model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Tuple =AlbertForPreTraining(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , sentence_order_label=UpperCAmelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Tuple =AlbertForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : List[str] =AlbertForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[Any] =self.num_labels lowercase : Any =AlbertForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ): '''simple docstring''' lowercase : List[Any] =self.num_labels lowercase : str =AlbertForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Optional[int] =self.num_choices lowercase : List[Any] =AlbertForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Union[str, Any] =self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Dict =config_and_inputs lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase_ = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase_ = True def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=False ): '''simple docstring''' lowercase : Optional[int] =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class in get_values(UpperCAmelCase__ ): lowercase : Any =torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ ) lowercase : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) return inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Tuple =AlbertModelTester(self ) lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase : Tuple =type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : str =AlbertModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : int =AlbertModel.from_pretrained('''albert-base-v2''' ) lowercase : Optional[int] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowercase : Any =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] lowercase : int =torch.Size((1, 11, 768) ) self.assertEqual(output.shape , UpperCAmelCase__ ) lowercase : Union[str, Any] =torch.tensor( [[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case = "ClapFeatureExtractor" snake_case = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ): super().__init__(UpperCAmelCase__ , UpperCAmelCase__ ) def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : str=None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ): lowerCamelCase__ = kwargs.pop("""sampling_rate""" , UpperCAmelCase__ ) if text is None and audios is None: raise ValueError("""You have to specify either text or audios. Both cannot be none.""" ) if text is not None: lowerCamelCase__ = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ) if audios is not None: lowerCamelCase__ = self.feature_extractor( UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ) if text is not None and audios is not None: lowerCamelCase__ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ ) def __UpperCAmelCase ( self : Tuple , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Tuple ): return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) def __UpperCAmelCase ( self : List[Any] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ): return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) @property def __UpperCAmelCase ( self : List[str] ): lowerCamelCase__ = self.tokenizer.model_input_names lowerCamelCase__ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
129
'''simple docstring''' import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ): '''simple docstring''' if dst_width < 0 or dst_height < 0: raise ValueError('''Destination width/height should be > 0''' ) lowercase : Union[str, Any] =img lowercase : Union[str, Any] =img.shape[1] lowercase : str =img.shape[0] lowercase : Union[str, Any] =dst_width lowercase : str =dst_height lowercase : str =self.src_w / self.dst_w lowercase : Optional[Any] =self.src_h / self.dst_h lowercase : int =( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for i in range(self.dst_h ): for j in range(self.dst_w ): lowercase : List[Any] =self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )] def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_x * x ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_y * y ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 800, 600 UpperCamelCase_ = imread("""image_data/lena.jpg""", 1) UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output ) waitKey(0) destroyAllWindows()
92
0
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _A = None _A = "<" if sys.byteorder == "little" else ">" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _A = [ np.dtype("|b1"), np.dtype("|u1"), np.dtype("<u2"), np.dtype(">u2"), np.dtype("<i2"), np.dtype(">i2"), np.dtype("<u4"), np.dtype(">u4"), np.dtype("<i4"), np.dtype(">i4"), np.dtype("<f4"), np.dtype(">f4"), np.dtype("<f8"), np.dtype(">f8"), ] @dataclass class _lowerCAmelCase : _lowercase =True _lowercase =None # Automatically constructed _lowercase ='''PIL.Image.Image''' _lowercase =pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) _lowercase =field(default='''Image''' , init=lowercase__ , repr=lowercase__ ) def __call__( self ) -> Optional[Any]: return self.pa_type def __a ( self , _UpperCamelCase ) -> Tuple: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install \'Pillow\'." ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowerCAmelCase_ = np.array(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return {"path": value, "bytes": None} elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return {"path": None, "bytes": value} elif isinstance(UpperCAmelCase__ , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(UpperCAmelCase__ ) elif isinstance(UpperCAmelCase__ , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(UpperCAmelCase__ ) elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( f"""An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.""" ) def __a ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[Any]: if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support decoding images, please install \'Pillow\'." ) if token_per_repo_id is None: lowerCAmelCase_ = {} lowerCAmelCase_ = value['''path'''], value['''bytes'''] if bytes_ is None: if path is None: raise ValueError(f"""An image should have one of \'path\' or \'bytes\' but both are None in {value}.""" ) else: if is_local_path(UpperCAmelCase__ ): lowerCAmelCase_ = PIL.Image.open(UpperCAmelCase__ ) else: lowerCAmelCase_ = path.split("::" )[-1] try: lowerCAmelCase_ = string_to_dict(UpperCAmelCase__ , config.HUB_DATASETS_URL )['''repo_id'''] lowerCAmelCase_ = token_per_repo_id.get(UpperCAmelCase__ ) except ValueError: lowerCAmelCase_ = None with xopen(UpperCAmelCase__ , "rb" , use_auth_token=UpperCAmelCase__ ) as f: lowerCAmelCase_ = BytesIO(f.read() ) lowerCAmelCase_ = PIL.Image.open(bytes_ ) else: lowerCAmelCase_ = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def __a ( self ) -> Union[str, Any]: from .features import Value return ( self if self.decode else { "bytes": Value("binary" ), "path": Value("string" ), } ) def __a ( self , _UpperCamelCase ) -> List[Any]: if pa.types.is_string(storage.type ): lowerCAmelCase_ = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.binary() ) lowerCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): lowerCAmelCase_ = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.string() ) lowerCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: lowerCAmelCase_ = storage.field("bytes" ) else: lowerCAmelCase_ = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: lowerCAmelCase_ = storage.field("path" ) else: lowerCAmelCase_ = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.string() ) lowerCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): lowerCAmelCase_ = pa.array( [encode_np_array(np.array(UpperCAmelCase__ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) lowerCAmelCase_ = pa.array([None] * len(UpperCAmelCase__ ) , type=pa.string() ) lowerCAmelCase_ = pa.StructArray.from_arrays( [bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(UpperCAmelCase__ , self.pa_type ) def __a ( self , _UpperCamelCase ) -> Optional[Any]: @no_op_if_value_is_null def path_to_bytes(_UpperCamelCase ): with xopen(UpperCAmelCase__ , "rb" ) as f: lowerCAmelCase_ = f.read() return bytes_ lowerCAmelCase_ = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) lowerCAmelCase_ = pa.array( [os.path.basename(UpperCAmelCase__ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , ) lowerCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(UpperCAmelCase__ , self.pa_type ) def lowerCamelCase__ ( ): """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install \'Pillow\'." ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() lowerCAmelCase_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCamelCase__ ( __lowerCAmelCase : "PIL.Image.Image" ): """simple docstring""" lowerCAmelCase_ = BytesIO() if image.format in list_image_compression_formats(): lowerCAmelCase_ = image.format else: lowerCAmelCase_ = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF''' image.save(__lowerCAmelCase , format=__lowerCAmelCase ) return buffer.getvalue() def lowerCamelCase__ ( __lowerCAmelCase : "PIL.Image.Image" ): """simple docstring""" if hasattr(__lowerCAmelCase , "filename" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(__lowerCAmelCase )} def lowerCamelCase__ ( __lowerCAmelCase : np.ndarray ): """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install \'Pillow\'." ) lowerCAmelCase_ = array.dtype lowerCAmelCase_ = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER lowerCAmelCase_ = dtype.kind lowerCAmelCase_ = dtype.itemsize lowerCAmelCase_ = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: lowerCAmelCase_ = np.dtype("|u1" ) if dtype_kind not in ["u", "i"]: raise TypeError( F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: lowerCAmelCase_ = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: lowerCAmelCase_ = dtype_byteorder + dtype_kind + str(__lowerCAmelCase ) lowerCAmelCase_ = np.dtype(__lowerCAmelCase ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) lowerCAmelCase_ = PIL.Image.fromarray(array.astype(__lowerCAmelCase ) ) return {"path": None, "bytes": image_to_bytes(__lowerCAmelCase )} def lowerCamelCase__ ( __lowerCAmelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ): """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install \'Pillow\'." ) if objs: lowerCAmelCase_ = first_non_null_value(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(__lowerCAmelCase , np.ndarray ): lowerCAmelCase_ = no_op_if_value_is_null(__lowerCAmelCase ) return [obj_to_image_dict_func(__lowerCAmelCase ) for obj in objs] elif isinstance(__lowerCAmelCase , PIL.Image.Image ): lowerCAmelCase_ = no_op_if_value_is_null(__lowerCAmelCase ) return [obj_to_image_dict_func(__lowerCAmelCase ) for obj in objs] else: return objs else: return objs
290
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Any =0.0_0 lowercase : Tuple =0 for resistor in resistors: if resistor <= 0: lowercase : Dict =f'''Resistor at index {index} has a negative or zero value!''' raise ValueError(__magic_name__ ) first_sum += 1 / float(__magic_name__ ) index += 1 return 1 / first_sum def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Optional[Any] =0.0_0 lowercase : int =0 for resistor in resistors: sum_r += resistor if resistor < 0: lowercase : Tuple =f'''Resistor at index {index} has a negative value!''' raise ValueError(__magic_name__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
92
0
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation A__ : Tuple = logging.get_logger(__name__) A__ : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} A__ : List[str] = { '''tokenizer_file''': { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''', }, } A__ : List[Any] = { '''gpt-neox-20b''': 2_0_4_8, } class snake_case__ ( lowercase__ ): A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , __a : List[str]=None , __a : Optional[int]=None , __a : str=None , __a : Optional[Any]="<|endoftext|>" , __a : str="<|endoftext|>" , __a : Optional[Any]="<|endoftext|>" , __a : int=False , **__a : int , ) -> Tuple: '''simple docstring''' super().__init__( UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , **UpperCAmelCase__ , ) __snake_case : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space: __snake_case : Any = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) ) __snake_case : List[str] = add_prefix_space __snake_case : List[str] = pre_tok_class(**UpperCAmelCase__ ) __snake_case : Optional[int] = add_prefix_space def A_ ( self : int , __a : str , __a : Optional[str] = None ) -> Tuple: '''simple docstring''' __snake_case : int = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ ) return tuple(UpperCAmelCase__ ) def A_ ( self : Union[str, Any] , __a : "Conversation" ) -> Tuple: '''simple docstring''' __snake_case : Dict = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) + [self.eos_token_id] ) if len(UpperCAmelCase__ ) > self.model_max_length: __snake_case : Optional[Any] = input_ids[-self.model_max_length :] return input_ids
286
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""", """self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""", """self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""", """self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""", """self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""", """self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""", """self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""", """self_attn.rotary_emb""": """encoder.embed_positions""", """self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""", """conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""", """conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""", """conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""", """conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""", """conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""", """ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""", """ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""", """ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""", """ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""", """ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""", """ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } UpperCamelCase_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> str: for attribute in key.split('''.''' ): lowercase : Tuple =getattr(__magic_name__ , __magic_name__ ) if weight_type is not None: lowercase : Optional[int] =getattr(__magic_name__ , __magic_name__ ).shape else: lowercase : List[Any] =hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase : Any =value elif weight_type == "weight_g": lowercase : List[Any] =value elif weight_type == "weight_v": lowercase : Union[str, Any] =value elif weight_type == "bias": lowercase : Tuple =value elif weight_type == "running_mean": lowercase : Union[str, Any] =value elif weight_type == "running_var": lowercase : str =value elif weight_type == "num_batches_tracked": lowercase : Tuple =value elif weight_type == "inv_freq": lowercase : Optional[Any] =value else: lowercase : Tuple =value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]: lowercase : Optional[int] =[] lowercase : Tuple =fairseq_model.state_dict() lowercase : List[Any] =hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): lowercase : Tuple =False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , ) lowercase : List[Any] =True else: for key, mapped_key in MAPPING.items(): lowercase : Optional[int] ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowercase : Union[str, Any] =True if "*" in mapped_key: lowercase : Optional[int] =name.split(__magic_name__ )[0].split('''.''' )[-2] lowercase : List[str] =mapped_key.replace('''*''' , __magic_name__ ) if "pos_bias_u" in name: lowercase : Optional[Any] =None elif "pos_bias_v" in name: lowercase : Union[str, Any] =None elif "weight_g" in name: lowercase : Any ='''weight_g''' elif "weight_v" in name: lowercase : Tuple ='''weight_v''' elif "bias" in name: lowercase : Optional[int] ='''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase : Optional[int] ='''weight''' elif "running_mean" in name: lowercase : Union[str, Any] ='''running_mean''' elif "inv_freq" in name: lowercase : Any ='''inv_freq''' elif "running_var" in name: lowercase : Tuple ='''running_var''' elif "num_batches_tracked" in name: lowercase : Dict ='''num_batches_tracked''' else: lowercase : str =None set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) continue if not is_used: unused_weights.append(__magic_name__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> int: lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1] lowercase : Any =name.split('''.''' ) lowercase : List[str] =int(items[0] ) lowercase : Union[str, Any] =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase : Union[str, Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase : Optional[Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) lowercase : Optional[int] =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase : str =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__magic_name__ ) @torch.no_grad() def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=True ) -> Union[str, Any]: if config_path is not None: lowercase : Optional[Any] =WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' ) else: lowercase : Optional[int] =WavaVecaConformerConfig() if "rope" in checkpoint_path: lowercase : Dict ='''rotary''' if is_finetuned: if dict_path: lowercase : Optional[Any] =Dictionary.load(__magic_name__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase : str =target_dict.pad_index lowercase : Union[str, Any] =target_dict.bos_index lowercase : Any =target_dict.eos_index lowercase : Tuple =len(target_dict.symbols ) lowercase : str =os.path.join(__magic_name__ , '''vocab.json''' ) if not os.path.isdir(__magic_name__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) ) return os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowercase : Dict =target_dict.indices # fairseq has the <pad> and <s> switched lowercase : str =0 lowercase : List[Any] =1 with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(__magic_name__ , __magic_name__ ) lowercase : List[str] =WavaVecaCTCTokenizer( __magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , ) lowercase : Optional[int] =True if config.feat_extract_norm == '''layer''' else False lowercase : str =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , ) lowercase : Tuple =WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) processor.save_pretrained(__magic_name__ ) lowercase : str =WavaVecaConformerForCTC(__magic_name__ ) else: lowercase : Tuple =WavaVecaConformerForPreTraining(__magic_name__ ) if is_finetuned: lowercase , lowercase , lowercase : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: lowercase : Dict =argparse.Namespace(task='''audio_pretraining''' ) lowercase : Optional[int] =fairseq.tasks.setup_task(__magic_name__ ) lowercase , lowercase , lowercase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ ) lowercase : List[Any] =model[0].eval() recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned ) hf_wavavec.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCamelCase_ = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
92
0
import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. snake_case : Tuple = abspath(join(dirname(__file__), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ): """simple docstring""" config.addinivalue_line( 'markers' ,'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' ) config.addinivalue_line( 'markers' ,'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' ) config.addinivalue_line('markers' ,'is_pipeline_test: mark test to run only when pipelines are tested' ) config.addinivalue_line('markers' ,'is_staging_test: mark test to run only in the staging environment' ) config.addinivalue_line('markers' ,'accelerate_tests: mark test that require accelerate' ) config.addinivalue_line('markers' ,'tool_tests: mark the tool tests that are run on their specific schedule' ) def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ): """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(UpperCAmelCase__ ) def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ): """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main _SCREAMING_SNAKE_CASE = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(UpperCAmelCase__ ,id=UpperCAmelCase__ ) def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ): """simple docstring""" if exitstatus == 5: _SCREAMING_SNAKE_CASE = 0 # Doctest custom flag to ignore output. snake_case : List[str] = doctest.register_optionflag('IGNORE_RESULT') snake_case : Any = doctest.OutputChecker class __lowercase ( lowercase__ ): """simple docstring""" def __magic_name__ ( self , A_ , A_ , A_ )-> int: if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) snake_case : Union[str, Any] = CustomOutputChecker snake_case : List[Any] = HfDoctestModule snake_case : Optional[int] = HfDocTestParser
605
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def _lowerCAmelCase ( __magic_name__ : jnp.ndarray , __magic_name__ : int , __magic_name__ : float = 1 , __magic_name__ : float = 1 , __magic_name__ : float = 1.0E4 , __magic_name__ : bool = False , __magic_name__ : float = 1.0 , ) -> jnp.ndarray: assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even''' lowercase : int =float(embedding_dim // 2 ) lowercase : Optional[int] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) lowercase : Any =min_timescale * jnp.exp(jnp.arange(__magic_name__ , dtype=jnp.floataa ) * -log_timescale_increment ) lowercase : List[Any] =jnp.expand_dims(__magic_name__ , 1 ) * jnp.expand_dims(__magic_name__ , 0 ) # scale embeddings lowercase : Tuple =scale * emb if flip_sin_to_cos: lowercase : Dict =jnp.concatenate([jnp.cos(__magic_name__ ), jnp.sin(__magic_name__ )] , axis=1 ) else: lowercase : Any =jnp.concatenate([jnp.sin(__magic_name__ ), jnp.cos(__magic_name__ )] , axis=1 ) lowercase : List[str] =jnp.reshape(__magic_name__ , [jnp.shape(__magic_name__ )[0], embedding_dim] ) return signal class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = jnp.floataa @nn.compact def __call__( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : List[Any] =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCAmelCase__ ) lowercase : Any =nn.silu(UpperCAmelCase__ ) lowercase : int =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCAmelCase__ ) return temb class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = False lowerCamelCase_ = 1 @nn.compact def __call__( self : int , UpperCAmelCase__ : str ): '''simple docstring''' return get_sinusoidal_embeddings( UpperCAmelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
92
0
"""simple docstring""" import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets __UpperCAmelCase ="""\ @inproceedings{snover-etal-2006-study, title = \"A Study of Translation Edit Rate with Targeted Human Annotation\", author = \"Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John\", booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\", month = aug # \" 8-12\", year = \"2006\", address = \"Cambridge, Massachusetts, USA\", publisher = \"Association for Machine Translation in the Americas\", url = \"https://aclanthology.org/2006.amta-papers.25\", pages = \"223--231\", } @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __UpperCAmelCase ="""\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. """ __UpperCAmelCase =""" Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: 'score' (float): TER score (num_edits / sum_ref_lengths * 100) 'num_edits' (int): The cumulative number of edits 'ref_length' (float): The cumulative average reference length Examples: Example 1: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\", ... \"What did the TER metric user say to the developer?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"], ... [\"Your jokes are...\", \"...TERrible\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0} Example 2: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0} Example 3: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5} Example 4: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0} Example 5: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\", ... \"What did the TER metric user say to the developer?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"], ... [\"Your jokes are...\", \"...TERrible\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def lowercase_ ( self ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ): '''simple docstring''' A__ = len(references[0] ) if any(len(UpperCAmelCase__ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) A__ = [[refs[i] for refs in references] for i in range(UpperCAmelCase__ )] A__ = TER( normalized=UpperCAmelCase__ , no_punct=UpperCAmelCase__ , asian_support=UpperCAmelCase__ , case_sensitive=UpperCAmelCase__ , ) A__ = sb_ter.corpus_score(UpperCAmelCase__ , UpperCAmelCase__ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
337
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) # TODO Update this UpperCamelCase_ = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'esm' def __init__( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=1026 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int , ): '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : Any =vocab_size lowercase : List[Any] =hidden_size lowercase : Any =num_hidden_layers lowercase : Optional[Any] =num_attention_heads lowercase : Tuple =intermediate_size lowercase : int =hidden_dropout_prob lowercase : Dict =attention_probs_dropout_prob lowercase : Optional[int] =max_position_embeddings lowercase : Union[str, Any] =initializer_range lowercase : Tuple =layer_norm_eps lowercase : Union[str, Any] =position_embedding_type lowercase : List[Any] =use_cache lowercase : Dict =emb_layer_norm_before lowercase : Optional[Any] =token_dropout lowercase : Union[str, Any] =is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) lowercase : Any =EsmFoldConfig() elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase : Optional[int] =EsmFoldConfig(**UpperCAmelCase__ ) lowercase : Union[str, Any] =esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) lowercase : int =get_default_vocab_list() else: lowercase : Tuple =vocab_list else: lowercase : Union[str, Any] =None lowercase : Dict =None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Union[str, Any] =super().to_dict() if isinstance(self.esmfold_config , UpperCAmelCase__ ): lowercase : Optional[Any] =self.esmfold_config.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = None lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = 0 lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' if self.trunk is None: lowercase : str =TrunkConfig() elif isinstance(self.trunk , UpperCAmelCase__ ): lowercase : int =TrunkConfig(**self.trunk ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =asdict(self ) lowercase : Union[str, Any] =self.trunk.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 48 lowerCamelCase_ = 10_24 lowerCamelCase_ = 1_28 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = False lowerCamelCase_ = 4 lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' if self.structure_module is None: lowercase : Any =StructureModuleConfig() elif isinstance(self.structure_module , UpperCAmelCase__ ): lowercase : Union[str, Any] =StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowercase : str =self.sequence_state_dim // self.sequence_head_width lowercase : int =self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : List[Any] =asdict(self ) lowercase : Any =self.structure_module.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 3_84 lowerCamelCase_ = 1_28 lowerCamelCase_ = 16 lowerCamelCase_ = 1_28 lowerCamelCase_ = 12 lowerCamelCase_ = 4 lowerCamelCase_ = 8 lowerCamelCase_ = 0.1 lowerCamelCase_ = 8 lowerCamelCase_ = 1 lowerCamelCase_ = 2 lowerCamelCase_ = 7 lowerCamelCase_ = 10 lowerCamelCase_ = 1E-8 lowerCamelCase_ = 1E5 def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return asdict(self ) def _lowerCAmelCase ( ) -> Optional[int]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
92
0
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset __A = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) __A = dataset.iloc[:, 1:2].values __A = dataset.iloc[:, 2].values __A , __A , __A , __A = train_test_split(X, y, test_size=0.2, random_state=0) __A = PolynomialFeatures(degree=4) __A = poly_reg.fit_transform(X) __A = LinearRegression() pol_reg.fit(X_poly, y) def a__ ( ) -> str: plt.scatter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , color="red" ) plt.plot(__SCREAMING_SNAKE_CASE , pol_reg.predict(poly_reg.fit_transform(__SCREAMING_SNAKE_CASE ) ) , color="blue" ) plt.title("Truth or Bluff (Linear Regression)" ) plt.xlabel("Position level" ) plt.ylabel("Salary" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
346
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCamelCase_ = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple: config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def _lowerCAmelCase ( __magic_name__ : int ) -> Any: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Any ) -> Any: from transformers.testing_utils import pytest_terminal_summary_main lowercase : Optional[Any] =terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]: # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: lowercase : Optional[int] =0 # Doctest custom flag to ignore output. UpperCamelCase_ = doctest.register_optionflag("""IGNORE_RESULT""") UpperCamelCase_ = doctest.OutputChecker class __SCREAMING_SNAKE_CASE ( lowercase__ ): def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ): '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_ = CustomOutputChecker UpperCamelCase_ = HfDoctestModule UpperCamelCase_ = HfDocTestParser
92
0
import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) a : Any = logging.getLogger(__name__) def lowercase_ ( _UpperCamelCase ): '''simple docstring''' __lowercase = git.Repo(search_parent_directories=_UpperCamelCase ) __lowercase = { '''repo_id''': str(_UpperCamelCase ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), } with open(os.path.join(_UpperCamelCase , '''git_log.json''' ) , '''w''' ) as f: json.dump(_UpperCamelCase , _UpperCamelCase , indent=4 ) def lowercase_ ( _UpperCamelCase ): '''simple docstring''' if params.n_gpu <= 0: __lowercase = 0 __lowercase = -1 __lowercase = True __lowercase = False return assert torch.cuda.is_available() logger.info('''Initializing GPUs''' ) if params.n_gpu > 1: assert params.local_rank != -1 __lowercase = int(os.environ['''WORLD_SIZE'''] ) __lowercase = int(os.environ['''N_GPU_NODE'''] ) __lowercase = int(os.environ['''RANK'''] ) # number of nodes / node ID __lowercase = params.world_size // params.n_gpu_per_node __lowercase = params.global_rank // params.n_gpu_per_node __lowercase = True assert params.n_nodes == int(os.environ['''N_NODES'''] ) assert params.node_id == int(os.environ['''NODE_RANK'''] ) # local job (single GPU) else: assert params.local_rank == -1 __lowercase = 1 __lowercase = 0 __lowercase = 0 __lowercase = 0 __lowercase = 1 __lowercase = 1 __lowercase = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode __lowercase = params.node_id == 0 and params.local_rank == 0 __lowercase = params.n_nodes > 1 # summary __lowercase = F'--- Global rank: {params.global_rank} - ' logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes ) logger.info(PREFIX + '''Node ID : %i''' % params.node_id ) logger.info(PREFIX + '''Local rank : %i''' % params.local_rank ) logger.info(PREFIX + '''World size : %i''' % params.world_size ) logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node ) logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) ) logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) ) logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) ) logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('''Initializing PyTorch distributed''' ) torch.distributed.init_process_group( init_method='''env://''' , backend='''nccl''' , ) def lowercase_ ( _UpperCamelCase ): '''simple docstring''' np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
639
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = ['pixel_values'] def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : str , ): '''simple docstring''' super().__init__(**UpperCAmelCase__ ) lowercase : Union[str, Any] =do_rescale lowercase : List[Any] =rescale_factor lowercase : Tuple =do_pad lowercase : List[str] =pad_size def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] ): '''simple docstring''' return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' lowercase , lowercase : Union[str, Any] =get_image_size(UpperCAmelCase__ ) lowercase : Tuple =(old_height // size + 1) * size - old_height lowercase : Tuple =(old_width // size + 1) * size - old_width return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ): '''simple docstring''' lowercase : int =do_rescale if do_rescale is not None else self.do_rescale lowercase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : int =do_pad if do_pad is not None else self.do_pad lowercase : List[Any] =pad_size if pad_size is not None else self.pad_size lowercase : Any =make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. lowercase : Dict =[to_numpy_array(UpperCAmelCase__ ) for image in images] if do_rescale: lowercase : Tuple =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images] if do_pad: lowercase : Union[str, Any] =[self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images] lowercase : Dict =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images] lowercase : Any ={'''pixel_values''': images} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
92
0
from __future__ import annotations from collections.abc import MutableSequence class lowerCAmelCase_ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): if len(UpperCAmelCase__ ) != degree + 1: raise ValueError( 'The number of coefficients should be equal to the degree + 1.' ) SCREAMING_SNAKE_CASE_ : list[float] =list(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =degree def __add__( self , __UpperCAmelCase ): if self.degree > polynomial_a.degree: SCREAMING_SNAKE_CASE_ : Optional[int] =self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , UpperCAmelCase__ ) else: SCREAMING_SNAKE_CASE_ : Dict =polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , UpperCAmelCase__ ) def __sub__( self , __UpperCAmelCase ): return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self ): return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self , __UpperCAmelCase ): SCREAMING_SNAKE_CASE_ : list[float] =[0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , UpperCAmelCase__ ) def __lowerCamelCase ( self , __UpperCAmelCase ): SCREAMING_SNAKE_CASE_ : int | float =0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self ): SCREAMING_SNAKE_CASE_ : Union[str, Any] ='''''' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(UpperCAmelCase__ ) return polynomial def __repr__( self ): return self.__str__() def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : list[float] =[0] * self.degree for i in range(self.degree ): SCREAMING_SNAKE_CASE_ : Tuple =self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , UpperCAmelCase__ ) def __lowerCamelCase ( self , __UpperCAmelCase = 0 ): SCREAMING_SNAKE_CASE_ : list[float] =[0] * (self.degree + 2) SCREAMING_SNAKE_CASE_ : str =constant for i in range(self.degree + 1 ): SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , UpperCAmelCase__ ) def __eq__( self , __UpperCAmelCase ): if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self , __UpperCAmelCase ): return not self.__eq__(UpperCAmelCase__ )
220
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """MBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """MBartForCausalLM""", """MBartForConditionalGeneration""", """MBartForQuestionAnswering""", """MBartForSequenceClassification""", """MBartModel""", """MBartPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """TFMBartForConditionalGeneration""", """TFMBartModel""", """TFMBartPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """FlaxMBartForConditionalGeneration""", """FlaxMBartForQuestionAnswering""", """FlaxMBartForSequenceClassification""", """FlaxMBartModel""", """FlaxMBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["MBartTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["MBartTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "MBART_PRETRAINED_MODEL_ARCHIVE_LIST", "MBartForCausalLM", "MBartForConditionalGeneration", "MBartForQuestionAnswering", "MBartForSequenceClassification", "MBartModel", "MBartPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "TFMBartForConditionalGeneration", "TFMBartModel", "TFMBartPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "FlaxMBartForConditionalGeneration", "FlaxMBartForQuestionAnswering", "FlaxMBartForSequenceClassification", "FlaxMBartModel", "FlaxMBartPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
181
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
"""simple docstring""" import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''): __UpperCamelCase : Optional[Any] = True from torch.cuda.amp import autocast __UpperCamelCase : Any = logging.getLogger(__name__) @dataclass class a : snake_case__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) snake_case__ = field( default=lowercase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) snake_case__ = field( default=lowercase__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) snake_case__ = field( default=lowercase__ , metadata={'''help''': '''Whether to log verbose messages or not.'''} , ) snake_case__ = field( default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} ) snake_case__ = field( default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} ) snake_case__ = field( default=0.99_99_95 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : ModelArguments , _UpperCAmelCase : TrainingArguments ): logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) lowerCAmelCase = logging.WARNING if model_args.verbose_logging: lowerCAmelCase = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): lowerCAmelCase = logging.INFO logger.setLevel(_UpperCAmelCase ) @dataclass class a : snake_case__ = field( default=lowercase__ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) snake_case__ = field( default=lowercase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) snake_case__ = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) snake_case__ = field( default='''validation''' , metadata={ '''help''': ( '''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) snake_case__ = field( default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , ) snake_case__ = field( default=lowercase__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) snake_case__ = field( default=1 , metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } , ) snake_case__ = field( default=lowercase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) snake_case__ = field( default=2_0.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} ) @dataclass class a : snake_case__ = 4_2 snake_case__ = 4_2 snake_case__ = '''longest''' snake_case__ = None snake_case__ = None def __call__( self , _snake_case ): """simple docstring""" lowerCAmelCase = self.feature_extractor.pad( UpperCAmelCase__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) lowerCAmelCase = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] ) lowerCAmelCase = batch['''input_values'''].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula lowerCAmelCase = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to( torch.long ) lowerCAmelCase = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device ) # these two operations makes sure that all values # before the output lengths indices are attended to lowerCAmelCase = 1 lowerCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices lowerCAmelCase = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCAmelCase__ , min_masks=2 , ) return batch class a ( lowercase__ ): def __init__( self , *_snake_case , _snake_case=1 , _snake_case=0 , _snake_case=1.0 , **_snake_case ): """simple docstring""" super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ ) lowerCAmelCase = 0 lowerCAmelCase = max_gumbel_temp lowerCAmelCase = min_gumbel_temp lowerCAmelCase = gumbel_temp_decay def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" model.train() lowerCAmelCase = self._prepare_inputs(UpperCAmelCase__ ) if self.use_amp: with autocast(): lowerCAmelCase = self.compute_loss(UpperCAmelCase__ , UpperCAmelCase__ ) else: lowerCAmelCase = self.compute_loss(UpperCAmelCase__ , UpperCAmelCase__ ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": lowerCAmelCase = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": lowerCAmelCase = loss.sum() / (inputs['''mask_time_indices''']).sum() else: raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' ) if self.args.gradient_accumulation_steps > 1: lowerCAmelCase = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(UpperCAmelCase__ ).backward() elif self.use_apex: with amp.scale_loss(UpperCAmelCase__ , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(UpperCAmelCase__ ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) return loss.detach() def _SCREAMING_SNAKE_CASE (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowerCAmelCase = parser.parse_args_into_dataclasses() configure_logger(_UpperCAmelCase , _UpperCAmelCase ) # Downloading and loading a dataset from the hub. lowerCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" lowerCAmelCase = DatasetDict() lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , ) lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" lowerCAmelCase = DatasetDict() lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , ) lowerCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_UpperCAmelCase ) def prepare_dataset(_UpperCAmelCase : Optional[Any] ): # check that all files have the correct sampling rate lowerCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays lowerCAmelCase = datasets.map( _UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names ) # filter audio files that are too long lowerCAmelCase = vectorized_datasets.filter( lambda _UpperCAmelCase : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(_UpperCAmelCase : Tuple ): return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` lowerCAmelCase = vectorized_datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 lowerCAmelCase = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( 'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and' ' ``config.feat_extract_norm=\'layer\'' ) lowerCAmelCase = WavaVecaForPreTraining(_UpperCAmelCase ) lowerCAmelCase = DataCollatorForWavaVecaPretraining(model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase ) lowerCAmelCase = WavaVecaPreTrainer( model=_UpperCAmelCase , data_collator=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=_UpperCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
4
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCamelCase_ = logging.getLogger(__name__) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]: return (preds == labels).mean() @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} ) lowerCamelCase_ = field(metadata={'help': 'Should contain the data files for the task.'} ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def _lowerCAmelCase ( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowercase , lowercase , lowercase : List[Any] =parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __magic_name__ ) # Set seed set_seed(training_args.seed ) try: lowercase : Any =processors[data_args.task_name]() lowercase : Optional[int] =processor.get_labels() lowercase : str =len(__magic_name__ ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase : List[str] =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) lowercase : int =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase : Any =AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , ) # Get datasets lowercase : int =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowercase : Union[str, Any] =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__magic_name__ : EvalPrediction ) -> Dict: lowercase : Dict =np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(__magic_name__ , p.label_ids )} # Data collator lowercase : List[str] =DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowercase : Dict =Trainer( model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowercase : Optional[Any] ={} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowercase : List[Any] =trainer.evaluate() lowercase : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(__magic_name__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ ) writer.write('''%s = %s\n''' % (key, value) ) results.update(__magic_name__ ) return results def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
92
0
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class _lowerCAmelCase : """simple docstring""" def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=13 , SCREAMING_SNAKE_CASE__ : Optional[int]=30 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : int=10 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[str]=2 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = scope UpperCamelCase = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = num_patches + 2 def __lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self : int ): """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): """simple docstring""" UpperCamelCase = DeiTModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() UpperCamelCase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): """simple docstring""" UpperCamelCase = DeiTForMaskedImageModeling(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() UpperCamelCase = model(UpperCAmelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = DeiTForMaskedImageModeling(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" UpperCamelCase = self.type_sequence_label_size UpperCamelCase = DeiTForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() UpperCamelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = DeiTForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCAmelCase ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( UpperCamelCase ) = config_and_inputs UpperCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] =( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ : int =( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ : Optional[Any] =False SCREAMING_SNAKE_CASE_ : str =False SCREAMING_SNAKE_CASE_ : Optional[int] =False def __lowerCAmelCase ( self : Tuple ): """simple docstring""" UpperCamelCase = DeiTModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 ) def __lowerCAmelCase ( self : Optional[Any] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def __lowerCAmelCase ( self : int ): """simple docstring""" pass def __lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(UpperCAmelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) ) def __lowerCAmelCase ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(UpperCAmelCase__ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def __lowerCAmelCase ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def __lowerCAmelCase ( self : int ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__ ) def __lowerCAmelCase ( self : Any ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any=False ): """simple docstring""" UpperCamelCase = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __lowerCAmelCase ( self : str ): """simple docstring""" if not self.model_tester.is_training: return UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(UpperCAmelCase__ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue UpperCamelCase = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.train() UpperCamelCase = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) UpperCamelCase = model(**UpperCAmelCase__ ).loss loss.backward() def __lowerCAmelCase ( self : List[str] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCamelCase = False UpperCamelCase = True for model_class in self.all_model_classes: if model_class in get_values(UpperCAmelCase__ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue UpperCamelCase = model_class(UpperCAmelCase__ ) model.gradient_checkpointing_enable() model.to(UpperCAmelCase__ ) model.train() UpperCamelCase = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) UpperCamelCase = model(**UpperCAmelCase__ ).loss loss.backward() def __lowerCAmelCase ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = [ {'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float}, {'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long}, {'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ): UpperCamelCase = problem_type['''title'''] UpperCamelCase = problem_type['''num_labels'''] UpperCamelCase = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.train() UpperCamelCase = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if problem_type["num_labels"] > 1: UpperCamelCase = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] ) UpperCamelCase = inputs['''labels'''].to(problem_type['dtype'] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=UpperCAmelCase__ ) as warning_list: UpperCamelCase = model(**UpperCAmelCase__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'Something is going wrong in the regression problem: intercepted {w.message}' ) loss.backward() @slow def __lowerCAmelCase ( self : int ): """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = DeiTModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def __lowerCamelCase ( ) -> List[str]: UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def __lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to( UpperCAmelCase__ ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).to(UpperCAmelCase__ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**UpperCAmelCase__ ) # verify the logits UpperCamelCase = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase__ ) UpperCamelCase = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def __lowerCAmelCase ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = DeiTModel.from_pretrained( 'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ) UpperCamelCase = inputs.pixel_values.to(UpperCAmelCase__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): UpperCamelCase = model(UpperCAmelCase__ )
282
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu""" def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple=100 , __magic_name__ : Optional[int]=" " ) -> List[str]: lowercase : List[Any] =text.split(__magic_name__ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )] def _lowerCAmelCase ( __magic_name__ : dict ) -> dict: lowercase , lowercase : int =[], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(__magic_name__ ): titles.append(title if title is not None else '''''' ) texts.append(__magic_name__ ) return {"title": titles, "text": texts} def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : DPRContextEncoder , __magic_name__ : DPRContextEncoderTokenizerFast ) -> dict: lowercase : Dict =ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=__magic_name__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids'''] lowercase : Optional[int] =ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def _lowerCAmelCase ( __magic_name__ : "RagExampleArguments" , __magic_name__ : "ProcessingArguments" , __magic_name__ : "IndexHnswArguments" , ) -> str: ###################################### logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase : Tuple =load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase : Optional[int] =dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc ) # And compute the embeddings lowercase : Any =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ ) lowercase : Any =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowercase : Optional[int] =Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space lowercase : Optional[Any] =dataset.map( partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , ) # And finally save your dataset lowercase : Optional[Any] =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(__magic_name__ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase : Union[str, Any] =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=__magic_name__ ) # And save the index lowercase : Dict =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(__magic_name__ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) lowerCamelCase_ = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) lowerCamelCase_ = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=lowercase__ , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) lowerCamelCase_ = field( default=16 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=7_68 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
92
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __magic_name__ = { """configuration_layoutlmv3""": [ """LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv3Config""", """LayoutLMv3OnnxConfig""", ], """processing_layoutlmv3""": ["""LayoutLMv3Processor"""], """tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ["""LayoutLMv3TokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ """LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv3ForQuestionAnswering""", """LayoutLMv3ForSequenceClassification""", """LayoutLMv3ForTokenClassification""", """LayoutLMv3Model""", """LayoutLMv3PreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ """TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLayoutLMv3ForQuestionAnswering""", """TFLayoutLMv3ForSequenceClassification""", """TFLayoutLMv3ForTokenClassification""", """TFLayoutLMv3Model""", """TFLayoutLMv3PreTrainedModel""", ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ["""LayoutLMv3FeatureExtractor"""] __magic_name__ = ["""LayoutLMv3ImageProcessor"""] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys __magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
129
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right UpperCamelCase_ = 128022 UpperCamelCase_ = 128028 @require_sentencepiece class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = MaMaaaTokenizer lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = True def lowerCamelCase_ ( self : Dict ): '''simple docstring''' super().setUp() lowercase : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] lowercase : List[Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) lowercase : List[Any] =Path(self.tmpdirname ) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowercase : Tuple =MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : int ): '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict ): '''simple docstring''' return ( "This is a test", "This is a test", ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Tuple ='''</s>''' lowercase : Union[str, Any] =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.get_tokenizer() lowercase : Optional[Any] =list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''</s>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''<s>''' ) self.assertEqual(len(UpperCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('''Skip this test while all models are still to be uploaded.''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Union[str, Any] =self.get_tokenizer() lowercase : str =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2, 3, 4, 5, 6] , ) lowercase : Optional[int] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) lowercase : Tuple =tokenizer.convert_tokens_to_string(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , '''This is a test''' ) @slow def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' # fmt: off lowercase : int ={'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): lowerCamelCase_ = 'facebook/m2m100_418M' lowerCamelCase_ = [ 'In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence', ] lowerCamelCase_ = [ 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', ] # fmt: off lowerCamelCase_ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2] @classmethod def lowerCamelCase_ ( cls : Optional[Any] ): '''simple docstring''' lowercase : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' ) lowercase : Optional[int] =1 return cls def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 ) self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 ) self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 ) self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : List[str] =self.tokenizer.get_vocab() self.assertEqual(len(UpperCAmelCase__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab['''<unk>'''] , 3 ) self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : List[Any] ='''en''' lowercase : Optional[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids ) # fmt: off lowercase : str =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on lowercase : Optional[Any] =self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) lowercase : Optional[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =tempfile.mkdtemp() lowercase : Tuple =self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(UpperCAmelCase__ ) lowercase : Union[str, Any] =MaMaaaTokenizer.from_pretrained(UpperCAmelCase__ ) self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase__ ) @require_torch def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[str] ='''en''' lowercase : int ='''fr''' lowercase : Union[str, Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors='''pt''' ) lowercase : str =shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: lowercase : int =batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[int] ='''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) lowercase : Union[str, Any] ='''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int ='''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) lowercase : Optional[Any] ='''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Optional[Any] =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' ) self.assertEqual( nested_simplify(UpperCAmelCase__ ) , { # en_XX, A, test, EOS '''input_ids''': [[128022, 58, 4183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128006, } , )
92
0
import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class _lowerCAmelCase ( unittest.TestCase ): def __a ( self ) -> Tuple: lowerCAmelCase_ = inspect.getfile(accelerate.test_utils ) lowerCAmelCase_ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 lowerCAmelCase_ = test_metrics @require_cpu def __a ( self ) -> List[Any]: debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def __a ( self ) -> List[str]: debug_launcher(self.test_metrics.main ) @require_single_gpu def __a ( self ) -> Dict: self.test_metrics.main() @require_multi_gpu def __a ( self ) -> str: print(f"""Found {torch.cuda.device_count()} devices.""" ) lowerCAmelCase_ = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
290
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int: try: lowercase : Any =int(__magic_name__ ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) lowercase : Optional[Any] =2 lowercase : Dict =0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 lowercase : Union[str, Any] =i while n % i == 0: lowercase : Optional[int] =n // i i += 1 return int(__magic_name__ ) if __name__ == "__main__": print(f'''{solution() = }''')
92
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A__ : str = { '''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig'''] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any = ['''RemBertTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = ['''RemBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Union[str, Any] = [ '''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RemBertForCausalLM''', '''RemBertForMaskedLM''', '''RemBertForMultipleChoice''', '''RemBertForQuestionAnswering''', '''RemBertForSequenceClassification''', '''RemBertForTokenClassification''', '''RemBertLayer''', '''RemBertModel''', '''RemBertPreTrainedModel''', '''load_tf_weights_in_rembert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any = [ '''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRemBertForCausalLM''', '''TFRemBertForMaskedLM''', '''TFRemBertForMultipleChoice''', '''TFRemBertForQuestionAnswering''', '''TFRemBertForSequenceClassification''', '''TFRemBertForTokenClassification''', '''TFRemBertLayer''', '''TFRemBertModel''', '''TFRemBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys A__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
286
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'speech_to_text_2' lowerCamelCase_ = ['past_key_values'] lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : int , UpperCAmelCase__ : Dict=10000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1024 , **UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : List[str] =vocab_size lowercase : Optional[int] =d_model lowercase : Optional[Any] =decoder_ffn_dim lowercase : Any =decoder_layers lowercase : Dict =decoder_attention_heads lowercase : List[Any] =dropout lowercase : List[Any] =attention_dropout lowercase : Any =activation_dropout lowercase : Optional[Any] =activation_function lowercase : Optional[int] =init_std lowercase : Dict =decoder_layerdrop lowercase : Optional[int] =use_cache lowercase : Optional[Any] =decoder_layers lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True lowercase : str =max_target_positions super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
92
0
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ): """simple docstring""" _SCREAMING_SNAKE_CASE = 0 # if input_string is "aba" than new_input_string become "a|b|a" _SCREAMING_SNAKE_CASE = '''''' _SCREAMING_SNAKE_CASE = '''''' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(UpperCAmelCase__ ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _SCREAMING_SNAKE_CASE = 0, 0 # length[i] shows the length of palindromic substring with center i _SCREAMING_SNAKE_CASE = [1 for i in range(len(UpperCAmelCase__ ) )] # for each character in new_string find corresponding palindromic string _SCREAMING_SNAKE_CASE = 0 for j in range(len(UpperCAmelCase__ ) ): _SCREAMING_SNAKE_CASE = 1 if j > r else min(length[l + r - j] // 2 ,r - j + 1 ) while ( j - k >= 0 and j + k < len(UpperCAmelCase__ ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _SCREAMING_SNAKE_CASE = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _SCREAMING_SNAKE_CASE = j - k + 1 # noqa: E741 _SCREAMING_SNAKE_CASE = j + k - 1 # update max_length and start position if max_length < length[j]: _SCREAMING_SNAKE_CASE = length[j] _SCREAMING_SNAKE_CASE = j # create that string _SCREAMING_SNAKE_CASE = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
605
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=[10, 20, 30, 40] , UpperCAmelCase__ : Any=[2, 2, 3, 2] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Dict=[2, 3, 4] , UpperCAmelCase__ : Optional[int]=None , ): '''simple docstring''' lowercase : List[Any] =parent lowercase : Tuple =batch_size lowercase : List[str] =image_size lowercase : List[Any] =num_channels lowercase : Union[str, Any] =num_stages lowercase : int =hidden_sizes lowercase : Any =depths lowercase : Tuple =is_training lowercase : str =use_labels lowercase : List[Any] =intermediate_size lowercase : int =hidden_act lowercase : Union[str, Any] =num_labels lowercase : Optional[int] =initializer_range lowercase : int =out_features lowercase : List[str] =out_indices lowercase : str =scope def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : Dict =None if self.use_labels: lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_labels ) lowercase : Dict =self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Any ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' lowercase : Dict =ConvNextVaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Dict =ConvNextVaForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Union[str, Any] =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[int] =model(UpperCAmelCase__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase : Optional[Any] =None lowercase : str =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Any =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : str =config_and_inputs lowercase : Any ={'''pixel_values''': pixel_values} return config, inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : str =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : List[str] =config_and_inputs lowercase : Optional[Any] ={'''pixel_values''': pixel_values, '''labels''': labels} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCamelCase_ = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Dict =ConvNextVaModelTester(self ) lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Any ): '''simple docstring''' return @unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : Optional[int] =True if model_class.__name__ in [ *get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ ), ]: continue lowercase : Dict =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : List[Any] =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : List[Any] =False lowercase : Any =True if ( model_class.__name__ in [*get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ )] or not model_class.supports_gradient_checkpointing ): continue lowercase : Any =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.gradient_checkpointing_enable() model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : int =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Dict =model_class(UpperCAmelCase__ ) lowercase : Union[str, Any] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : int =[*signature.parameters.keys()] lowercase : Optional[Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ): lowercase : int =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase : Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase : List[Any] =self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : List[str] =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase : Tuple =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : List[Any] =ConvNextVaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def _lowerCAmelCase ( ) -> List[Any]: lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase__ ) lowercase : int =self.default_image_processor lowercase : List[str] =prepare_img() lowercase : List[Any] =preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ ) # forward pass with torch.no_grad(): lowercase : Dict =model(**UpperCAmelCase__ ) # verify the logits lowercase : Optional[Any] =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase__ ) lowercase : Tuple =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase =logging.get_logger(__name__) __UpperCAmelCase ={ """google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""", """google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json""" # See all FNet models at https://huggingface.co/models?filter=fnet } class lowerCAmelCase__ ( lowercase__ ): lowercase__ : Optional[Any] = """fnet""" def __init__( self , UpperCamelCase__=3_20_00 , UpperCamelCase__=7_68 , UpperCamelCase__=12 , UpperCamelCase__=30_72 , UpperCamelCase__="gelu_new" , UpperCamelCase__=0.1 , UpperCamelCase__=5_12 , UpperCamelCase__=4 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=False , UpperCamelCase__=5_12 , UpperCamelCase__=3 , UpperCamelCase__=1 , UpperCamelCase__=2 , **UpperCamelCase__ , ): '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = initializer_range A__ = type_vocab_size A__ = layer_norm_eps A__ = use_tpu_fourier_optimizations A__ = tpu_short_seq_length
337
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels UpperCamelCase_ = object() # For specifying empty leaf dict `{}` UpperCamelCase_ = object() def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Optional[int]: lowercase : Optional[Any] =tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ): lowercase : Union[str, Any] =[x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )] if matches and all(__magic_name__ ): return True return False def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[str]: def replace(__magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ): for rule, replacement in rules: if _match(__magic_name__ , __magic_name__ ): return replacement return val return replace def _lowerCAmelCase ( ) -> int: return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , __magic_name__ )), (("transformer", "wte", "embedding"), P('''mp''' , __magic_name__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , __magic_name__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__magic_name__ , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , __magic_name__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _lowerCAmelCase ( __magic_name__ : str ) -> int: lowercase : int =_get_partition_rules() lowercase : Tuple =_replacement_rules(__magic_name__ ) lowercase : Any ={k: _unmatched for k in flatten_dict(__magic_name__ )} lowercase : Any ={k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__magic_name__ ) )
92
0
"""simple docstring""" from __future__ import annotations def a__ ( __SCREAMING_SNAKE_CASE ) -> float: __lowerCAmelCase: Any = 0.00 __lowerCAmelCase: Tuple = 0 for resistor in resistors: if resistor <= 0: __lowerCAmelCase: Dict = F"Resistor at index {index} has a negative or zero value!" raise ValueError(__SCREAMING_SNAKE_CASE ) first_sum += 1 / float(__SCREAMING_SNAKE_CASE ) index += 1 return 1 / first_sum def a__ ( __SCREAMING_SNAKE_CASE ) -> float: __lowerCAmelCase: Optional[Any] = 0.00 __lowerCAmelCase: int = 0 for resistor in resistors: sum_r += resistor if resistor < 0: __lowerCAmelCase: Tuple = F"Resistor at index {index} has a negative value!" raise ValueError(__SCREAMING_SNAKE_CASE ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
346
'''simple docstring''' from collections import defaultdict def _lowerCAmelCase ( __magic_name__ : int ) -> int: lowercase : Optional[Any] =1 lowercase : Union[str, Any] =True for v in tree[start]: if v not in visited: ret += dfs(__magic_name__ ) if ret % 2 == 0: cuts.append(__magic_name__ ) return ret def _lowerCAmelCase ( ) -> int: dfs(1 ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 10, 9 UpperCamelCase_ = defaultdict(list) UpperCamelCase_ = {} UpperCamelCase_ = [] UpperCamelCase_ = 0 UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
92
0
import math import sys def lowercase_ ( _UpperCamelCase ): '''simple docstring''' __lowercase = '''''' try: with open(_UpperCamelCase , '''rb''' ) as binary_file: __lowercase = binary_file.read() for dat in data: __lowercase = F'{dat:08b}' result += curr_byte return result except OSError: print('''File not accessible''' ) sys.exit() def lowercase_ ( _UpperCamelCase ): '''simple docstring''' __lowercase = {'''0''': '''0''', '''1''': '''1'''} __lowercase = '''''', '''''' __lowercase = len(_UpperCamelCase ) for i in range(len(_UpperCamelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __lowercase = lexicon[curr_string] result += last_match_id __lowercase = last_match_id + '''0''' if math.loga(_UpperCamelCase ).is_integer(): __lowercase = {} for curr_key in list(_UpperCamelCase ): __lowercase = lexicon.pop(_UpperCamelCase ) __lowercase = new_lex __lowercase = last_match_id + '''1''' index += 1 __lowercase = '''''' return result def lowercase_ ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowercase = 8 try: with open(_UpperCamelCase , '''wb''' ) as opened_file: __lowercase = [ to_write[i : i + byte_length] for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('''10000000''' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder='''big''' ) ) except OSError: print('''File not accessible''' ) sys.exit() def lowercase_ ( _UpperCamelCase ): '''simple docstring''' __lowercase = 0 for letter in data_bits: if letter == "1": break counter += 1 __lowercase = data_bits[counter:] __lowercase = data_bits[counter + 1 :] return data_bits def lowercase_ ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowercase = read_file_binary(_UpperCamelCase ) __lowercase = remove_prefix(_UpperCamelCase ) __lowercase = decompress_data(_UpperCamelCase ) write_file_binary(_UpperCamelCase , _UpperCamelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
639
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase_ = logging.get_logger(__name__) def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Dict: lowercase : List[str] =R'''\w+[.]\d+''' lowercase : List[str] =re.findall(__magic_name__ , __magic_name__ ) for pat in pats: lowercase : Optional[int] =key.replace(__magic_name__ , '''_'''.join(pat.split('''.''' ) ) ) return key def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> str: lowercase : Dict =pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowercase : Dict =pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowercase : Tuple =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowercase : Tuple =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowercase : str =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowercase : Optional[Any] =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowercase : Dict =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowercase : Union[str, Any] =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=42 ) -> List[str]: # Step 1: Convert pytorch tensor to numpy lowercase : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowercase : str =flax_model.init_weights(PRNGKey(__magic_name__ ) ) lowercase : Dict =flatten_dict(__magic_name__ ) lowercase : Dict ={} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowercase : Dict =rename_key(__magic_name__ ) lowercase : Optional[int] =tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowercase , lowercase : Any =rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowercase : Tuple =jnp.asarray(__magic_name__ ) return unflatten_dict(__magic_name__ )
92
0
import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Any ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict =SwinConfig(image_size=192 ) if "base" in model_name: SCREAMING_SNAKE_CASE_ : Any =6 SCREAMING_SNAKE_CASE_ : Dict =128 SCREAMING_SNAKE_CASE_ : List[Any] =(2, 2, 18, 2) SCREAMING_SNAKE_CASE_ : Optional[int] =(4, 8, 16, 32) elif "large" in model_name: SCREAMING_SNAKE_CASE_ : int =12 SCREAMING_SNAKE_CASE_ : str =192 SCREAMING_SNAKE_CASE_ : int =(2, 2, 18, 2) SCREAMING_SNAKE_CASE_ : Optional[int] =(6, 12, 24, 48) else: raise ValueError('Model not supported, only supports base and large variants' ) SCREAMING_SNAKE_CASE_ : List[str] =window_size SCREAMING_SNAKE_CASE_ : Dict =embed_dim SCREAMING_SNAKE_CASE_ : Optional[int] =depths SCREAMING_SNAKE_CASE_ : Union[str, Any] =num_heads return config def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : int ) -> Dict: """simple docstring""" if "encoder.mask_token" in name: SCREAMING_SNAKE_CASE_ : Any =name.replace('encoder.mask_token' ,'embeddings.mask_token' ) if "encoder.patch_embed.proj" in name: SCREAMING_SNAKE_CASE_ : Dict =name.replace('encoder.patch_embed.proj' ,'embeddings.patch_embeddings.projection' ) if "encoder.patch_embed.norm" in name: SCREAMING_SNAKE_CASE_ : Dict =name.replace('encoder.patch_embed.norm' ,'embeddings.norm' ) if "attn.proj" in name: SCREAMING_SNAKE_CASE_ : Any =name.replace('attn.proj' ,'attention.output.dense' ) if "attn" in name: SCREAMING_SNAKE_CASE_ : List[str] =name.replace('attn' ,'attention.self' ) if "norm1" in name: SCREAMING_SNAKE_CASE_ : Optional[Any] =name.replace('norm1' ,'layernorm_before' ) if "norm2" in name: SCREAMING_SNAKE_CASE_ : Dict =name.replace('norm2' ,'layernorm_after' ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE_ : List[Any] =name.replace('mlp.fc1' ,'intermediate.dense' ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE_ : Optional[Any] =name.replace('mlp.fc2' ,'output.dense' ) if name == "encoder.norm.weight": SCREAMING_SNAKE_CASE_ : List[str] ='''layernorm.weight''' if name == "encoder.norm.bias": SCREAMING_SNAKE_CASE_ : List[str] ='''layernorm.bias''' if "decoder" in name: pass else: SCREAMING_SNAKE_CASE_ : Any ='''swin.''' + name return name def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Any ) -> Optional[int]: """simple docstring""" for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE_ : str =orig_state_dict.pop(lowerCAmelCase_ ) if "attn_mask" in key: pass elif "qkv" in key: SCREAMING_SNAKE_CASE_ : List[str] =key.split('.' ) SCREAMING_SNAKE_CASE_ : Any =int(key_split[2] ) SCREAMING_SNAKE_CASE_ : List[str] =int(key_split[4] ) SCREAMING_SNAKE_CASE_ : int =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE_ : Dict =val[:dim, :] SCREAMING_SNAKE_CASE_ : Optional[int] =val[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE_ : str =val[-dim:, :] else: SCREAMING_SNAKE_CASE_ : Any =val[ :dim ] SCREAMING_SNAKE_CASE_ : Optional[int] =val[ dim : dim * 2 ] SCREAMING_SNAKE_CASE_ : Any =val[ -dim: ] else: SCREAMING_SNAKE_CASE_ : Any =val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Any ,lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : int ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple =torch.load(lowerCAmelCase_ ,map_location='cpu' )['''model'''] SCREAMING_SNAKE_CASE_ : Union[str, Any] =get_swin_config(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ : int =SwinForMaskedImageModeling(lowerCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE_ : List[Any] =convert_state_dict(lowerCAmelCase_ ,lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ : List[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg''' SCREAMING_SNAKE_CASE_ : Optional[Any] =ViTImageProcessor(size={'height': 192, 'width': 192} ) SCREAMING_SNAKE_CASE_ : List[str] =Image.open(requests.get(lowerCAmelCase_ ,stream=lowerCAmelCase_ ).raw ) SCREAMING_SNAKE_CASE_ : str =image_processor(images=lowerCAmelCase_ ,return_tensors='pt' ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : List[str] =model(**lowerCAmelCase_ ).logits print(outputs.keys() ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase_ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowerCAmelCase_ ) if push_to_hub: print(F"""Pushing model and image processor for {model_name} to hub""" ) model.push_to_hub(F"""microsoft/{model_name}""" ) image_processor.push_to_hub(F"""microsoft/{model_name}""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='swin-base-simmim-window6-192', type=str, choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'], help='Name of the Swin SimMIM model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth', type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __SCREAMING_SNAKE_CASE = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
220
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. UpperCamelCase_ = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. UpperCamelCase_ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. UpperCamelCase_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]: lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] ) return (item, float(__magic_name__ )) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]: lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 ) lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:] lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str: lowercase : Union[str, Any] =list(__magic_name__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowercase : Dict =random.choice(__magic_name__ ) return "".join(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]: lowercase : Any =[] # Generate more children proportionally to the fitness score. lowercase : Dict =int(parent_a[1] * 100 ) + 1 lowercase : List[str] =10 if child_n >= 10 else child_n for _ in range(__magic_name__ ): lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0] lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ ) # Append new string to the population list. pop.append(mutate(__magic_name__ , __magic_name__ ) ) pop.append(mutate(__magic_name__ , __magic_name__ ) ) return pop def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(__magic_name__ ) # Verify that the target contains no genes besides the ones inside genes variable. lowercase : Optional[int] =sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(__magic_name__ ) # Generate random starting population. lowercase : int =[] for _ in range(__magic_name__ ): population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) ) # Just some logs to know what the algorithms is doing. lowercase , lowercase : Optional[int] =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__magic_name__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population] # Check if there is a matching evolution. lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowercase : Any =population[: int(N_POPULATION / 3 )] population.clear() population.extend(__magic_name__ ) # Normalize population score to be between 0 and 1. lowercase : Dict =[ (item, score / len(__magic_name__ )) for item, score in population_score ] # This is selection for i in range(__magic_name__ ): population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__magic_name__ ) > N_POPULATION: break if __name__ == "__main__": UpperCamelCase_ = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) UpperCamelCase_ = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
92
0
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) _SCREAMING_SNAKE_CASE = logging.getLogger() _SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class SCREAMING_SNAKE_CASE_ ( lowercase__ ): """simple docstring""" def UpperCamelCase__ ( self :str, snake_case :Optional[Any]): """simple docstring""" os.makedirs(UpperCAmelCase__, exist_ok=UpperCAmelCase__) _lowercase ={'''source''': '''What is love ?''', '''target''': '''life'''} _lowercase ={'''train''': 12, '''val''': 2, '''test''': 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: _lowercase ='''\n'''.join([contents[field]] * n_lines[split]) with open(os.path.join(UpperCAmelCase__, f'''{split}.{field}'''), 'w') as f: f.write(UpperCAmelCase__) def UpperCamelCase__ ( self :Union[str, Any], snake_case :int, snake_case :str = "pytorch"): """simple docstring""" _lowercase =self.get_auto_remove_tmp_dir() _lowercase =os.path.join(UpperCAmelCase__, 'output') _lowercase =os.path.join(UpperCAmelCase__, 'data') self._create_dummy_data(data_dir=UpperCAmelCase__) _lowercase =f''' --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ '''.split() if gpus > 0: testargs.append(f'''--gpus={gpus}''') if is_apex_available(): testargs.append('--fp16') else: testargs.append('--gpus=0') testargs.append('--distributed_backend=ddp_cpu') testargs.append('--num_processes=2') _lowercase =[sys.executable, str(Path(finetune_rag.__file__).resolve())] + testargs execute_subprocess_async(UpperCAmelCase__, env=self.get_env()) _lowercase =os.path.join(UpperCAmelCase__, 'metrics.json') with open(UpperCAmelCase__) as f: _lowercase =json.load(UpperCAmelCase__) return result @require_torch_gpu def UpperCamelCase__ ( self :Optional[int]): """simple docstring""" _lowercase =self._run_finetune(gpus=1) self.assertGreaterEqual(result['test'][0]['test_avg_em'], 0.2) @require_torch_multi_gpu def UpperCamelCase__ ( self :List[Any]): """simple docstring""" _lowercase =self._run_finetune(gpus=2) self.assertGreaterEqual(result['test'][0]['test_avg_em'], 0.2) @require_torch_gpu @require_ray def UpperCamelCase__ ( self :Dict): """simple docstring""" _lowercase =self._run_finetune(gpus=1, distributed_retriever='ray') self.assertGreaterEqual(result['test'][0]['test_avg_em'], 0.2) @require_torch_multi_gpu @require_ray def UpperCamelCase__ ( self :int): """simple docstring""" _lowercase =self._run_finetune(gpus=1, distributed_retriever='ray') self.assertGreaterEqual(result['test'][0]['test_avg_em'], 0.2)
181
'''simple docstring''' import datasets UpperCamelCase_ = """\ @InProceedings{conneau2018xnli, author = \"Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin\", title = \"XNLI: Evaluating Cross-lingual Sentence Representations\", booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\", year = \"2018\", publisher = \"Association for Computational Linguistics\", location = \"Brussels, Belgium\", } """ UpperCamelCase_ = """\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). """ UpperCamelCase_ = """ Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: 'accuracy': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric(\"xnli\") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} """ def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): def lowerCamelCase_ ( self : str ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
92
0
"""simple docstring""" from __future__ import annotations def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ): # preprocessing the first row for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(_UpperCAmelCase ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(_UpperCAmelCase ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
4
'''simple docstring''' from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class __SCREAMING_SNAKE_CASE : def __init__( self : List[str] , UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : Any =parent lowercase : Optional[int] =13 lowercase : Union[str, Any] =7 lowercase : str =30 lowercase : Optional[int] =self.seq_length + self.mem_len lowercase : Dict =15 lowercase : List[str] =True lowercase : Optional[int] =True lowercase : Tuple =99 lowercase : str =[10, 50, 80] lowercase : List[Any] =32 lowercase : Optional[int] =32 lowercase : int =4 lowercase : Any =8 lowercase : List[Any] =128 lowercase : List[str] =2 lowercase : Tuple =2 lowercase : int =None lowercase : Optional[int] =1 lowercase : int =0 lowercase : List[str] =3 lowercase : str =self.vocab_size - 1 lowercase : Tuple =0.01 def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : str =None if self.use_labels: lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Union[str, Any] =TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' random.seed(self.seed ) tf.random.set_seed(self.seed ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Tuple =TFTransfoXLModel(UpperCAmelCase__ ) lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple() lowercase : List[str] ={'''input_ids''': input_ids_a, '''mems''': mems_a} lowercase , lowercase : Any =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : int =TFTransfoXLLMHeadModel(UpperCAmelCase__ ) lowercase , lowercase : Tuple =model(UpperCAmelCase__ ).to_tuple() lowercase : Optional[Any] ={'''input_ids''': input_ids_a, '''labels''': lm_labels} lowercase , lowercase : Optional[int] =model(UpperCAmelCase__ ).to_tuple() lowercase , lowercase : List[str] =model([input_ids_a, mems_a] ).to_tuple() lowercase : int ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels} lowercase , lowercase : str =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[int] =TFTransfoXLForSequenceClassification(UpperCAmelCase__ ) lowercase : Union[str, Any] =model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.prepare_config_and_inputs() ((lowercase) , (lowercase) , (lowercase) , (lowercase)) : Optional[Any] =config_and_inputs lowercase : Union[str, Any] ={'''input_ids''': input_ids_a} return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) lowerCamelCase_ = () if is_tf_available() else () lowerCamelCase_ = ( { 'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =TFTransfoXLModelTester(self ) lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' self.model_tester.set_seed() lowercase : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.model_tester.set_seed() lowercase : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common() lowercase : int =[TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: lowercase : str =model_class(UpperCAmelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: lowercase : Union[str, Any] =model.get_output_embeddings() assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer ) lowercase : Any =model.get_bias() assert name is None else: lowercase : Optional[int] =model.get_output_embeddings() assert x is None lowercase : Optional[int] =model.get_bias() assert name is None def lowerCamelCase_ ( self : Any ): '''simple docstring''' # TODO JP: Make TransfoXL XLA compliant pass @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : int =TFTransfoXLModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' ) def lowerCamelCase_ ( self : int ): '''simple docstring''' pass @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @unittest.skip('''Skip test until #12651 is resolved.''' ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' ) # fmt: off lowercase : Tuple =tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off lowercase : Optional[int] =[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> lowercase : int =model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ ) self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
92
0
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCAmelCase : """simple docstring""" def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=99 , SCREAMING_SNAKE_CASE__ : Any=16 , SCREAMING_SNAKE_CASE__ : Optional[Any]=36 , SCREAMING_SNAKE_CASE__ : str=6 , SCREAMING_SNAKE_CASE__ : Any=6 , SCREAMING_SNAKE_CASE__ : Optional[int]=6 , SCREAMING_SNAKE_CASE__ : Any=37 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : List[str]=16 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Tuple=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = embedding_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_hidden_groups UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = num_choices UpperCamelCase = scope def __lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" UpperCamelCase = AlbertModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() UpperCamelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) UpperCamelCase = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) UpperCamelCase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ): """simple docstring""" UpperCamelCase = AlbertForPreTraining(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() UpperCamelCase = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , sentence_order_label=UpperCAmelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" UpperCamelCase = AlbertForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() UpperCamelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): """simple docstring""" UpperCamelCase = AlbertForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() UpperCamelCase = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = AlbertForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() UpperCamelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = AlbertForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() UpperCamelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" UpperCamelCase = self.num_choices UpperCamelCase = AlbertForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self : int ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( UpperCamelCase ) = config_and_inputs UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple =( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ : Any =( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ : int =True def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=False ): """simple docstring""" UpperCamelCase = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class in get_values(UpperCAmelCase__ ): UpperCamelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ ) UpperCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) return inputs_dict def __lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = AlbertModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def __lowerCAmelCase ( self : List[str] ): """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def __lowerCAmelCase ( self : List[str] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ ) def __lowerCAmelCase ( self : Dict ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def __lowerCAmelCase ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def __lowerCAmelCase ( self : int ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def __lowerCAmelCase ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def __lowerCAmelCase ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) @slow def __lowerCAmelCase ( self : Optional[Any] ): """simple docstring""" for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = AlbertModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def __lowerCAmelCase ( self : Tuple ): """simple docstring""" UpperCamelCase = AlbertModel.from_pretrained('albert-base-v2' ) UpperCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCamelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] UpperCamelCase = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , UpperCAmelCase__ ) UpperCamelCase = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1e-4 ) )
282
'''simple docstring''' import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE : def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=36 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Tuple=None , ): '''simple docstring''' lowercase : str =parent lowercase : int =batch_size lowercase : Any =seq_length lowercase : int =is_training lowercase : str =use_input_mask lowercase : int =use_token_type_ids lowercase : Dict =use_labels lowercase : int =vocab_size lowercase : str =embedding_size lowercase : Union[str, Any] =hidden_size lowercase : Tuple =num_hidden_layers lowercase : Any =num_hidden_groups lowercase : Union[str, Any] =num_attention_heads lowercase : Any =intermediate_size lowercase : Tuple =hidden_act lowercase : Optional[int] =hidden_dropout_prob lowercase : Union[str, Any] =attention_probs_dropout_prob lowercase : List[Any] =max_position_embeddings lowercase : int =type_vocab_size lowercase : int =type_sequence_label_size lowercase : Any =initializer_range lowercase : List[Any] =num_labels lowercase : int =num_choices lowercase : Optional[int] =scope def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[int] =None if self.use_input_mask: lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : Dict =None if self.use_token_type_ids: lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : Tuple =None lowercase : Any =None lowercase : Dict =None if self.use_labels: lowercase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices ) lowercase : Any =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : int =AlbertModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : int =model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Tuple =AlbertForPreTraining(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , sentence_order_label=UpperCAmelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Tuple =AlbertForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : List[str] =AlbertForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[Any] =self.num_labels lowercase : Any =AlbertForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ): '''simple docstring''' lowercase : List[Any] =self.num_labels lowercase : str =AlbertForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Optional[int] =self.num_choices lowercase : List[Any] =AlbertForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Union[str, Any] =self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Dict =config_and_inputs lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase_ = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase_ = True def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=False ): '''simple docstring''' lowercase : Optional[int] =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class in get_values(UpperCAmelCase__ ): lowercase : Any =torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ ) lowercase : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) return inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Tuple =AlbertModelTester(self ) lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase : Tuple =type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : str =AlbertModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : int =AlbertModel.from_pretrained('''albert-base-v2''' ) lowercase : Optional[int] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowercase : Any =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] lowercase : int =torch.Size((1, 11, 768) ) self.assertEqual(output.shape , UpperCAmelCase__ ) lowercase : Union[str, Any] =torch.tensor( [[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
"""simple docstring""" from __future__ import annotations from statistics import mean def _A ( __lowercase , __lowercase , __lowercase ): """simple docstring""" lowerCamelCase__ = [0] * no_of_processes lowerCamelCase__ = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(__lowercase ): lowerCamelCase__ = burst_time[i] lowerCamelCase__ = [] lowerCamelCase__ = 0 lowerCamelCase__ = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: lowerCamelCase__ = [] lowerCamelCase__ = -1 for i in range(__lowercase ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(__lowercase ) if len(__lowercase ) > 0: lowerCamelCase__ = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: lowerCamelCase__ = i total_time += burst_time[target_process] completed += 1 lowerCamelCase__ = 0 lowerCamelCase__ = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def _A ( __lowercase , __lowercase , __lowercase ): """simple docstring""" lowerCamelCase__ = [0] * no_of_processes for i in range(__lowercase ): lowerCamelCase__ = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("""[TEST CASE 01]""") __magic_name__ = 4 __magic_name__ = [2, 5, 3, 7] __magic_name__ = [0, 0, 0, 0] __magic_name__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes) __magic_name__ = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""") for i, process_id in enumerate(list(range(1, 5))): print( F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t' F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}' ) print(F'\nAverage waiting time = {mean(waiting_time):.5f}') print(F'Average turnaround time = {mean(turn_around_time):.5f}')
129
'''simple docstring''' import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ): '''simple docstring''' if dst_width < 0 or dst_height < 0: raise ValueError('''Destination width/height should be > 0''' ) lowercase : Union[str, Any] =img lowercase : Union[str, Any] =img.shape[1] lowercase : str =img.shape[0] lowercase : Union[str, Any] =dst_width lowercase : str =dst_height lowercase : str =self.src_w / self.dst_w lowercase : Optional[Any] =self.src_h / self.dst_h lowercase : int =( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for i in range(self.dst_h ): for j in range(self.dst_w ): lowercase : List[Any] =self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )] def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_x * x ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_y * y ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 800, 600 UpperCamelCase_ = imread("""image_data/lena.jpg""", 1) UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output ) waitKey(0) destroyAllWindows()
92
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class _lowerCAmelCase ( unittest.TestCase ): def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=18 , _UpperCamelCase=30 , _UpperCamelCase=400 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=[0.5, 0.5, 0.5] , ) -> Optional[int]: lowerCAmelCase_ = size if size is not None else {'''shortest_edge''': 18} lowerCAmelCase_ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = image_size lowerCAmelCase_ = min_resolution lowerCAmelCase_ = max_resolution lowerCAmelCase_ = do_resize lowerCAmelCase_ = size lowerCAmelCase_ = do_center_crop lowerCAmelCase_ = crop_size lowerCAmelCase_ = do_normalize lowerCAmelCase_ = image_mean lowerCAmelCase_ = image_std def __a ( self ) -> Dict: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class _lowerCAmelCase ( lowercase__ , unittest.TestCase ): _lowercase =LevitImageProcessor if is_vision_available() else None def __a ( self ) -> Optional[int]: lowerCAmelCase_ = LevitImageProcessingTester(self ) @property def __a ( self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def __a ( self ) -> str: lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , "image_mean" ) ) self.assertTrue(hasattr(UpperCAmelCase__ , "image_std" ) ) self.assertTrue(hasattr(UpperCAmelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(UpperCAmelCase__ , "do_resize" ) ) self.assertTrue(hasattr(UpperCAmelCase__ , "do_center_crop" ) ) self.assertTrue(hasattr(UpperCAmelCase__ , "size" ) ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def __a ( self ) -> Dict: pass def __a ( self ) -> int: lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) # Test not batched input lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __a ( self ) -> Dict: lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
290
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Any =0.0_0 lowercase : Tuple =0 for resistor in resistors: if resistor <= 0: lowercase : Dict =f'''Resistor at index {index} has a negative or zero value!''' raise ValueError(__magic_name__ ) first_sum += 1 / float(__magic_name__ ) index += 1 return 1 / first_sum def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Optional[Any] =0.0_0 lowercase : int =0 for resistor in resistors: sum_r += resistor if resistor < 0: lowercase : Tuple =f'''Resistor at index {index} has a negative value!''' raise ValueError(__magic_name__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
92
0
'''simple docstring''' def a_ ( _UpperCAmelCase : int ) -> str: if number > 0: raise ValueError('input must be a negative integer' ) __snake_case : Dict = len(bin(_UpperCAmelCase )[3:] ) __snake_case : List[Any] = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:] __snake_case : List[str] = ( ( '''1''' + '''0''' * (binary_number_length - len(_UpperCAmelCase )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
286
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""", """self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""", """self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""", """self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""", """self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""", """self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""", """self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""", """self_attn.rotary_emb""": """encoder.embed_positions""", """self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""", """conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""", """conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""", """conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""", """conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""", """conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""", """ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""", """ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""", """ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""", """ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""", """ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""", """ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } UpperCamelCase_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> str: for attribute in key.split('''.''' ): lowercase : Tuple =getattr(__magic_name__ , __magic_name__ ) if weight_type is not None: lowercase : Optional[int] =getattr(__magic_name__ , __magic_name__ ).shape else: lowercase : List[Any] =hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase : Any =value elif weight_type == "weight_g": lowercase : List[Any] =value elif weight_type == "weight_v": lowercase : Union[str, Any] =value elif weight_type == "bias": lowercase : Tuple =value elif weight_type == "running_mean": lowercase : Union[str, Any] =value elif weight_type == "running_var": lowercase : str =value elif weight_type == "num_batches_tracked": lowercase : Tuple =value elif weight_type == "inv_freq": lowercase : Optional[Any] =value else: lowercase : Tuple =value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]: lowercase : Optional[int] =[] lowercase : Tuple =fairseq_model.state_dict() lowercase : List[Any] =hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): lowercase : Tuple =False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , ) lowercase : List[Any] =True else: for key, mapped_key in MAPPING.items(): lowercase : Optional[int] ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowercase : Union[str, Any] =True if "*" in mapped_key: lowercase : Optional[int] =name.split(__magic_name__ )[0].split('''.''' )[-2] lowercase : List[str] =mapped_key.replace('''*''' , __magic_name__ ) if "pos_bias_u" in name: lowercase : Optional[Any] =None elif "pos_bias_v" in name: lowercase : Union[str, Any] =None elif "weight_g" in name: lowercase : Any ='''weight_g''' elif "weight_v" in name: lowercase : Tuple ='''weight_v''' elif "bias" in name: lowercase : Optional[int] ='''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase : Optional[int] ='''weight''' elif "running_mean" in name: lowercase : Union[str, Any] ='''running_mean''' elif "inv_freq" in name: lowercase : Any ='''inv_freq''' elif "running_var" in name: lowercase : Tuple ='''running_var''' elif "num_batches_tracked" in name: lowercase : Dict ='''num_batches_tracked''' else: lowercase : str =None set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) continue if not is_used: unused_weights.append(__magic_name__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> int: lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1] lowercase : Any =name.split('''.''' ) lowercase : List[str] =int(items[0] ) lowercase : Union[str, Any] =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase : Union[str, Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase : Optional[Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) lowercase : Optional[int] =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase : str =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__magic_name__ ) @torch.no_grad() def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=True ) -> Union[str, Any]: if config_path is not None: lowercase : Optional[Any] =WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' ) else: lowercase : Optional[int] =WavaVecaConformerConfig() if "rope" in checkpoint_path: lowercase : Dict ='''rotary''' if is_finetuned: if dict_path: lowercase : Optional[Any] =Dictionary.load(__magic_name__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase : str =target_dict.pad_index lowercase : Union[str, Any] =target_dict.bos_index lowercase : Any =target_dict.eos_index lowercase : Tuple =len(target_dict.symbols ) lowercase : str =os.path.join(__magic_name__ , '''vocab.json''' ) if not os.path.isdir(__magic_name__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) ) return os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowercase : Dict =target_dict.indices # fairseq has the <pad> and <s> switched lowercase : str =0 lowercase : List[Any] =1 with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(__magic_name__ , __magic_name__ ) lowercase : List[str] =WavaVecaCTCTokenizer( __magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , ) lowercase : Optional[int] =True if config.feat_extract_norm == '''layer''' else False lowercase : str =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , ) lowercase : Tuple =WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) processor.save_pretrained(__magic_name__ ) lowercase : str =WavaVecaConformerForCTC(__magic_name__ ) else: lowercase : Tuple =WavaVecaConformerForPreTraining(__magic_name__ ) if is_finetuned: lowercase , lowercase , lowercase : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: lowercase : Dict =argparse.Namespace(task='''audio_pretraining''' ) lowercase : Optional[int] =fairseq.tasks.setup_task(__magic_name__ ) lowercase , lowercase , lowercase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ ) lowercase : List[Any] =model[0].eval() recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned ) hf_wavavec.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCamelCase_ = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
92
0
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar snake_case : Tuple = TypeVar('T') snake_case : List[Any] = TypeVar('U') class __lowercase ( Generic[T, U] ): """simple docstring""" def __init__( self , A_ , A_ )-> Optional[int]: _SCREAMING_SNAKE_CASE = key _SCREAMING_SNAKE_CASE = val _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None def __repr__( self )-> Optional[int]: return ( F'''Node: key: {self.key}, val: {self.val}, ''' F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}''' ) class __lowercase ( Generic[T, U] ): """simple docstring""" def __init__( self )-> int: _SCREAMING_SNAKE_CASE = DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE = DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE = self.rear, self.head def __repr__( self )-> Tuple: _SCREAMING_SNAKE_CASE = ['''DoubleLinkedList'''] _SCREAMING_SNAKE_CASE = self.head while node.next is not None: rep.append(str(UpperCAmelCase__ ) ) _SCREAMING_SNAKE_CASE = node.next rep.append(str(self.rear ) ) return ",\n ".join(UpperCAmelCase__ ) def __magic_name__ ( self , A_ )-> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _SCREAMING_SNAKE_CASE = node _SCREAMING_SNAKE_CASE = previous _SCREAMING_SNAKE_CASE = node _SCREAMING_SNAKE_CASE = self.rear def __magic_name__ ( self , A_ )-> int: if node.prev is None or node.next is None: return None _SCREAMING_SNAKE_CASE = node.next _SCREAMING_SNAKE_CASE = node.prev _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None return node class __lowercase ( Generic[T, U] ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = {} def __init__( self , A_ )-> Any: _SCREAMING_SNAKE_CASE = DoubleLinkedList() _SCREAMING_SNAKE_CASE = capacity _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = {} def __repr__( self )-> Optional[int]: return ( F'''CacheInfo(hits={self.hits}, misses={self.miss}, ''' F'''capacity={self.capacity}, current size={self.num_keys})''' ) def __contains__( self , A_ )-> List[str]: return key in self.cache def __magic_name__ ( self , A_ )-> Optional[Any]: # Note: pythonic interface would throw KeyError rather than return None if key in self.cache: self.hits += 1 _SCREAMING_SNAKE_CASE = self.cache[key] _SCREAMING_SNAKE_CASE = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(UpperCAmelCase__ ) return node.val self.miss += 1 return None def __magic_name__ ( self , A_ , A_ )-> Any: if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _SCREAMING_SNAKE_CASE = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(UpperCAmelCase__ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _SCREAMING_SNAKE_CASE = DoubleLinkedListNode(UpperCAmelCase__ , UpperCAmelCase__ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _SCREAMING_SNAKE_CASE = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _SCREAMING_SNAKE_CASE = value self.list.add(UpperCAmelCase__ ) @classmethod def __magic_name__ ( cls , A_ = 128 )-> Optional[int]: def cache_decorator_inner(A_ ) -> Callable[..., U]: def cache_decorator_wrapper(*A_ ) -> U: if func not in cls.decorator_function_to_instance_map: _SCREAMING_SNAKE_CASE = LRUCache(UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _SCREAMING_SNAKE_CASE = func(*UpperCAmelCase__ ) cls.decorator_function_to_instance_map[func].put(args[0] , UpperCAmelCase__ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(UpperCAmelCase__ , 'cache_info' , UpperCAmelCase__ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
605
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def _lowerCAmelCase ( __magic_name__ : jnp.ndarray , __magic_name__ : int , __magic_name__ : float = 1 , __magic_name__ : float = 1 , __magic_name__ : float = 1.0E4 , __magic_name__ : bool = False , __magic_name__ : float = 1.0 , ) -> jnp.ndarray: assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even''' lowercase : int =float(embedding_dim // 2 ) lowercase : Optional[int] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) lowercase : Any =min_timescale * jnp.exp(jnp.arange(__magic_name__ , dtype=jnp.floataa ) * -log_timescale_increment ) lowercase : List[Any] =jnp.expand_dims(__magic_name__ , 1 ) * jnp.expand_dims(__magic_name__ , 0 ) # scale embeddings lowercase : Tuple =scale * emb if flip_sin_to_cos: lowercase : Dict =jnp.concatenate([jnp.cos(__magic_name__ ), jnp.sin(__magic_name__ )] , axis=1 ) else: lowercase : Any =jnp.concatenate([jnp.sin(__magic_name__ ), jnp.cos(__magic_name__ )] , axis=1 ) lowercase : List[str] =jnp.reshape(__magic_name__ , [jnp.shape(__magic_name__ )[0], embedding_dim] ) return signal class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = jnp.floataa @nn.compact def __call__( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : List[Any] =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCAmelCase__ ) lowercase : Any =nn.silu(UpperCAmelCase__ ) lowercase : int =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCAmelCase__ ) return temb class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = False lowerCamelCase_ = 1 @nn.compact def __call__( self : int , UpperCAmelCase__ : str ): '''simple docstring''' return get_sinusoidal_embeddings( UpperCAmelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
92
0
"""simple docstring""" import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin __UpperCAmelCase =logging.get_logger(__name__) enable_full_determinism() class lowerCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): lowercase__ : Tuple = UNetaDModel lowercase__ : Union[str, Any] = """sample""" @property def lowercase_ ( self ): '''simple docstring''' A__ = 4 A__ = 3 A__ = (32, 32) A__ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ ) A__ = torch.tensor([10] ).to(UpperCAmelCase__ ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self ): '''simple docstring''' return (3, 32, 32) @property def lowercase_ ( self ): '''simple docstring''' return (3, 32, 32) def lowercase_ ( self ): '''simple docstring''' A__ = { '''block_out_channels''': (32, 64), '''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''), '''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''), '''attention_head_dim''': 3, '''out_channels''': 3, '''in_channels''': 3, '''layers_per_block''': 2, '''sample_size''': 32, } A__ = self.dummy_input return init_dict, inputs_dict class lowerCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): lowercase__ : Union[str, Any] = UNetaDModel lowercase__ : Optional[int] = """sample""" @property def lowercase_ ( self ): '''simple docstring''' A__ = 4 A__ = 4 A__ = (32, 32) A__ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ ) A__ = torch.tensor([10] ).to(UpperCAmelCase__ ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self ): '''simple docstring''' return (4, 32, 32) @property def lowercase_ ( self ): '''simple docstring''' return (4, 32, 32) def lowercase_ ( self ): '''simple docstring''' A__ = { '''sample_size''': 32, '''in_channels''': 4, '''out_channels''': 4, '''layers_per_block''': 2, '''block_out_channels''': (32, 64), '''attention_head_dim''': 32, '''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''), '''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''), } A__ = self.dummy_input return init_dict, inputs_dict def lowercase_ ( self ): '''simple docstring''' A__ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(UpperCAmelCase__ ) A__ = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" ) def lowercase_ ( self ): '''simple docstring''' A__ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) A__ = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" ) def lowercase_ ( self ): '''simple docstring''' A__ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase__ ) model_accelerate.to(UpperCAmelCase__ ) model_accelerate.eval() A__ = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) A__ = noise.to(UpperCAmelCase__ ) A__ = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase__ ) A__ = model_accelerate(UpperCAmelCase__ , UpperCAmelCase__ )['''sample'''] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() A__ = UNetaDModel.from_pretrained( "fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase__ , low_cpu_mem_usage=UpperCAmelCase__ ) model_normal_load.to(UpperCAmelCase__ ) model_normal_load.eval() A__ = model_normal_load(UpperCAmelCase__ , UpperCAmelCase__ )['''sample'''] assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-3 ) def lowercase_ ( self ): '''simple docstring''' A__ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" ) model.eval() model.to(UpperCAmelCase__ ) A__ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) A__ = noise.to(UpperCAmelCase__ ) A__ = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase__ ) with torch.no_grad(): A__ = model(UpperCAmelCase__ , UpperCAmelCase__ ).sample A__ = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off A__ = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] ) # fmt: on self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-3 ) ) class lowerCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): lowercase__ : Optional[int] = UNetaDModel lowercase__ : str = """sample""" @property def lowercase_ ( self , UpperCamelCase__=(32, 32) ): '''simple docstring''' A__ = 4 A__ = 3 A__ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ ) A__ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCAmelCase__ ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self ): '''simple docstring''' return (3, 32, 32) @property def lowercase_ ( self ): '''simple docstring''' return (3, 32, 32) def lowercase_ ( self ): '''simple docstring''' A__ = { '''block_out_channels''': [32, 64, 64, 64], '''in_channels''': 3, '''layers_per_block''': 1, '''out_channels''': 3, '''time_embedding_type''': '''fourier''', '''norm_eps''': 1e-6, '''mid_block_scale_factor''': math.sqrt(2.0 ), '''norm_num_groups''': None, '''down_block_types''': [ '''SkipDownBlock2D''', '''AttnSkipDownBlock2D''', '''SkipDownBlock2D''', '''SkipDownBlock2D''', ], '''up_block_types''': [ '''SkipUpBlock2D''', '''SkipUpBlock2D''', '''AttnSkipUpBlock2D''', '''SkipUpBlock2D''', ], } A__ = self.dummy_input return init_dict, inputs_dict @slow def lowercase_ ( self ): '''simple docstring''' A__ = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(UpperCAmelCase__ ) A__ = self.dummy_input A__ = floats_tensor((4, 3) + (2_56, 2_56) ).to(UpperCAmelCase__ ) A__ = noise A__ = model(**UpperCAmelCase__ ) assert image is not None, "Make sure output is not None" @slow def lowercase_ ( self ): '''simple docstring''' A__ = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" ) model.to(UpperCAmelCase__ ) A__ = 4 A__ = 3 A__ = (2_56, 2_56) A__ = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ ) A__ = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase__ ) with torch.no_grad(): A__ = model(UpperCAmelCase__ , UpperCAmelCase__ ).sample A__ = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off A__ = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] ) # fmt: on self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-2 ) ) def lowercase_ ( self ): '''simple docstring''' A__ = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" ) model.to(UpperCAmelCase__ ) A__ = 4 A__ = 3 A__ = (32, 32) A__ = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ ) A__ = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase__ ) with torch.no_grad(): A__ = model(UpperCAmelCase__ , UpperCAmelCase__ ).sample A__ = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off A__ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] ) # fmt: on self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-2 ) ) def lowercase_ ( self ): '''simple docstring''' pass
337
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) # TODO Update this UpperCamelCase_ = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'esm' def __init__( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=1026 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int , ): '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : Any =vocab_size lowercase : List[Any] =hidden_size lowercase : Any =num_hidden_layers lowercase : Optional[Any] =num_attention_heads lowercase : Tuple =intermediate_size lowercase : int =hidden_dropout_prob lowercase : Dict =attention_probs_dropout_prob lowercase : Optional[int] =max_position_embeddings lowercase : Union[str, Any] =initializer_range lowercase : Tuple =layer_norm_eps lowercase : Union[str, Any] =position_embedding_type lowercase : List[Any] =use_cache lowercase : Dict =emb_layer_norm_before lowercase : Optional[Any] =token_dropout lowercase : Union[str, Any] =is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) lowercase : Any =EsmFoldConfig() elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase : Optional[int] =EsmFoldConfig(**UpperCAmelCase__ ) lowercase : Union[str, Any] =esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) lowercase : int =get_default_vocab_list() else: lowercase : Tuple =vocab_list else: lowercase : Union[str, Any] =None lowercase : Dict =None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Union[str, Any] =super().to_dict() if isinstance(self.esmfold_config , UpperCAmelCase__ ): lowercase : Optional[Any] =self.esmfold_config.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = None lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = 0 lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' if self.trunk is None: lowercase : str =TrunkConfig() elif isinstance(self.trunk , UpperCAmelCase__ ): lowercase : int =TrunkConfig(**self.trunk ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =asdict(self ) lowercase : Union[str, Any] =self.trunk.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 48 lowerCamelCase_ = 10_24 lowerCamelCase_ = 1_28 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = False lowerCamelCase_ = 4 lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' if self.structure_module is None: lowercase : Any =StructureModuleConfig() elif isinstance(self.structure_module , UpperCAmelCase__ ): lowercase : Union[str, Any] =StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowercase : str =self.sequence_state_dim // self.sequence_head_width lowercase : int =self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : List[Any] =asdict(self ) lowercase : Any =self.structure_module.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 3_84 lowerCamelCase_ = 1_28 lowerCamelCase_ = 16 lowerCamelCase_ = 1_28 lowerCamelCase_ = 12 lowerCamelCase_ = 4 lowerCamelCase_ = 8 lowerCamelCase_ = 0.1 lowerCamelCase_ = 8 lowerCamelCase_ = 1 lowerCamelCase_ = 2 lowerCamelCase_ = 7 lowerCamelCase_ = 10 lowerCamelCase_ = 1E-8 lowerCamelCase_ = 1E5 def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return asdict(self ) def _lowerCAmelCase ( ) -> Optional[int]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
92
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __A = logging.get_logger(__name__) class snake_case ( lowercase__ ): def __init__( self : List[Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : int)-> Union[str, Any]: '''simple docstring''' warnings.warn( "The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use MobileViTImageProcessor instead." , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__)
346
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCamelCase_ = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple: config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def _lowerCAmelCase ( __magic_name__ : int ) -> Any: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Any ) -> Any: from transformers.testing_utils import pytest_terminal_summary_main lowercase : Optional[Any] =terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]: # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: lowercase : Optional[int] =0 # Doctest custom flag to ignore output. UpperCamelCase_ = doctest.register_optionflag("""IGNORE_RESULT""") UpperCamelCase_ = doctest.OutputChecker class __SCREAMING_SNAKE_CASE ( lowercase__ ): def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ): '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_ = CustomOutputChecker UpperCamelCase_ = HfDoctestModule UpperCamelCase_ = HfDocTestParser
92
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a : Dict = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Union[str, Any] = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys a : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
639
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = ['pixel_values'] def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : str , ): '''simple docstring''' super().__init__(**UpperCAmelCase__ ) lowercase : Union[str, Any] =do_rescale lowercase : List[Any] =rescale_factor lowercase : Tuple =do_pad lowercase : List[str] =pad_size def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] ): '''simple docstring''' return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' lowercase , lowercase : Union[str, Any] =get_image_size(UpperCAmelCase__ ) lowercase : Tuple =(old_height // size + 1) * size - old_height lowercase : Tuple =(old_width // size + 1) * size - old_width return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ): '''simple docstring''' lowercase : int =do_rescale if do_rescale is not None else self.do_rescale lowercase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : int =do_pad if do_pad is not None else self.do_pad lowercase : List[Any] =pad_size if pad_size is not None else self.pad_size lowercase : Any =make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. lowercase : Dict =[to_numpy_array(UpperCAmelCase__ ) for image in images] if do_rescale: lowercase : Tuple =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images] if do_pad: lowercase : Union[str, Any] =[self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images] lowercase : Dict =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images] lowercase : Any ={'''pixel_values''': images} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
92
0
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : Optional[int] =tempfile.mkdtemp() # fmt: off SCREAMING_SNAKE_CASE_ : Union[str, Any] =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on SCREAMING_SNAKE_CASE_ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) SCREAMING_SNAKE_CASE_ : Tuple ={ '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } SCREAMING_SNAKE_CASE_ : Optional[Any] =os.path.join(self.tmpdirname , UpperCAmelCase__ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) def __lowerCamelCase ( self , **__UpperCAmelCase ): return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def __lowerCamelCase ( self , **__UpperCAmelCase ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def __lowerCamelCase ( self ): shutil.rmtree(self.tmpdirname ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : Dict =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] SCREAMING_SNAKE_CASE_ : Optional[Any] =[Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : List[Any] =self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Tuple =self.get_image_processor() SCREAMING_SNAKE_CASE_ : Any =VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE_ : List[str] =VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : str =VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE_ : List[str] =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) SCREAMING_SNAKE_CASE_ : Tuple =VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : List[str] =self.get_image_processor() SCREAMING_SNAKE_CASE_ : List[str] =self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Optional[int] =VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[int] =self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Optional[Any] =image_processor(UpperCAmelCase__ , return_tensors='np' ) SCREAMING_SNAKE_CASE_ : List[str] =processor(images=UpperCAmelCase__ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.get_image_processor() SCREAMING_SNAKE_CASE_ : str =self.get_tokenizer() SCREAMING_SNAKE_CASE_ : List[str] =VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[Any] ='''lower newer''' SCREAMING_SNAKE_CASE_ : List[str] =processor(text=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer(UpperCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : List[Any] =self.get_image_processor() SCREAMING_SNAKE_CASE_ : List[str] =self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Any =VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE_ : str ='''lower newer''' SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Tuple =processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with self.assertRaises(UpperCAmelCase__ ): processor() def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_image_processor() SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Dict =VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Any =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE_ : Union[str, Any] =processor.batch_decode(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[int] =tokenizer.batch_decode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.get_image_processor() SCREAMING_SNAKE_CASE_ : Tuple =self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] ='''lower newer''' SCREAMING_SNAKE_CASE_ : List[str] =self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Any =processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
220
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """MBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """MBartForCausalLM""", """MBartForConditionalGeneration""", """MBartForQuestionAnswering""", """MBartForSequenceClassification""", """MBartModel""", """MBartPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """TFMBartForConditionalGeneration""", """TFMBartModel""", """TFMBartPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """FlaxMBartForConditionalGeneration""", """FlaxMBartForQuestionAnswering""", """FlaxMBartForSequenceClassification""", """FlaxMBartModel""", """FlaxMBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _SCREAMING_SNAKE_CASE = { "configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"], "tokenization_roc_bert": ["RoCBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RoCBertForCausalLM", "RoCBertForMaskedLM", "RoCBertForMultipleChoice", "RoCBertForPreTraining", "RoCBertForQuestionAnswering", "RoCBertForSequenceClassification", "RoCBertForTokenClassification", "RoCBertLayer", "RoCBertModel", "RoCBertPreTrainedModel", "load_tf_weights_in_roc_bert", ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
181
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
"""simple docstring""" import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging __UpperCamelCase : Optional[int] = logging.get_logger(__name__) class a ( lowercase__ ): snake_case__ = CLIPConfig snake_case__ = ['''CLIPEncoderLayer'''] def __init__( self , _snake_case ): """simple docstring""" super().__init__(UpperCAmelCase__ ) lowerCAmelCase = CLIPVisionModelWithProjection(config.vision_config ) lowerCAmelCase = nn.Linear(config.vision_config.projection_dim , 1 ) lowerCAmelCase = nn.Linear(config.vision_config.projection_dim , 1 ) @torch.no_grad() def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=0.5 , _snake_case=0.5 ): """simple docstring""" lowerCAmelCase = self.vision_model(UpperCAmelCase__ )[0] lowerCAmelCase = self.p_head(UpperCAmelCase__ ) lowerCAmelCase = nsfw_detected.flatten() lowerCAmelCase = nsfw_detected > p_threshold lowerCAmelCase = nsfw_detected.tolist() if any(UpperCAmelCase__ ): logger.warning( 'Potential NSFW content was detected in one or more images. A black image will be returned instead.' ' Try again with a different prompt and/or seed.' ) for idx, nsfw_detected_ in enumerate(UpperCAmelCase__ ): if nsfw_detected_: lowerCAmelCase = np.zeros(images[idx].shape ) lowerCAmelCase = self.w_head(UpperCAmelCase__ ) lowerCAmelCase = watermark_detected.flatten() lowerCAmelCase = watermark_detected > w_threshold lowerCAmelCase = watermark_detected.tolist() if any(UpperCAmelCase__ ): logger.warning( 'Potential watermarked content was detected in one or more images. A black image will be returned instead.' ' Try again with a different prompt and/or seed.' ) for idx, watermark_detected_ in enumerate(UpperCAmelCase__ ): if watermark_detected_: lowerCAmelCase = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
4
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCamelCase_ = logging.getLogger(__name__) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]: return (preds == labels).mean() @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} ) lowerCamelCase_ = field(metadata={'help': 'Should contain the data files for the task.'} ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def _lowerCAmelCase ( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowercase , lowercase , lowercase : List[Any] =parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __magic_name__ ) # Set seed set_seed(training_args.seed ) try: lowercase : Any =processors[data_args.task_name]() lowercase : Optional[int] =processor.get_labels() lowercase : str =len(__magic_name__ ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase : List[str] =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) lowercase : int =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase : Any =AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , ) # Get datasets lowercase : int =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowercase : Union[str, Any] =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__magic_name__ : EvalPrediction ) -> Dict: lowercase : Dict =np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(__magic_name__ , p.label_ids )} # Data collator lowercase : List[str] =DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowercase : Dict =Trainer( model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowercase : Optional[Any] ={} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowercase : List[Any] =trainer.evaluate() lowercase : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(__magic_name__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ ) writer.write('''%s = %s\n''' % (key, value) ) results.update(__magic_name__ ) return results def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
92
0
import numpy as np class _lowerCAmelCase : """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None ): """simple docstring""" self.set_matricies(red=UpperCAmelCase__ , green=UpperCAmelCase__ , blue=UpperCAmelCase__ , red_edge=UpperCAmelCase__ , nir=UpperCAmelCase__ ) def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=None ): """simple docstring""" if red is not None: UpperCamelCase = red if green is not None: UpperCamelCase = green if blue is not None: UpperCamelCase = blue if red_edge is not None: UpperCamelCase = red_edge if nir is not None: UpperCamelCase = nir return True def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : int="" , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ): """simple docstring""" self.set_matricies(red=UpperCAmelCase__ , green=UpperCAmelCase__ , blue=UpperCAmelCase__ , red_edge=UpperCAmelCase__ , nir=UpperCAmelCase__ ) UpperCamelCase = { '''ARVI2''': self.arvaa, '''CCCI''': self.ccci, '''CVI''': self.cvi, '''GLI''': self.gli, '''NDVI''': self.ndvi, '''BNDVI''': self.bndvi, '''redEdgeNDVI''': self.red_edge_ndvi, '''GNDVI''': self.gndvi, '''GBNDVI''': self.gbndvi, '''GRNDVI''': self.grndvi, '''RBNDVI''': self.rbndvi, '''PNDVI''': self.pndvi, '''ATSAVI''': self.atsavi, '''BWDRVI''': self.bwdrvi, '''CIgreen''': self.ci_green, '''CIrededge''': self.ci_rededge, '''CI''': self.ci, '''CTVI''': self.ctvi, '''GDVI''': self.gdvi, '''EVI''': self.evi, '''GEMI''': self.gemi, '''GOSAVI''': self.gosavi, '''GSAVI''': self.gsavi, '''Hue''': self.hue, '''IVI''': self.ivi, '''IPVI''': self.ipvi, '''I''': self.i, '''RVI''': self.rvi, '''MRVI''': self.mrvi, '''MSAVI''': self.m_savi, '''NormG''': self.norm_g, '''NormNIR''': self.norm_nir, '''NormR''': self.norm_r, '''NGRDI''': self.ngrdi, '''RI''': self.ri, '''S''': self.s, '''IF''': self._if, '''DVI''': self.dvi, '''TVI''': self.tvi, '''NDRE''': self.ndre, } try: return funcs[index]() except KeyError: print('Index not in the list!' ) return False def __lowerCAmelCase ( self : Any ): """simple docstring""" return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def __lowerCAmelCase ( self : Tuple ): """simple docstring""" return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def __lowerCAmelCase ( self : Optional[Any] ): """simple docstring""" return self.nir * (self.red / (self.green**2)) def __lowerCAmelCase ( self : Dict ): """simple docstring""" return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def __lowerCAmelCase ( self : int ): """simple docstring""" return (self.nir - self.red) / (self.nir + self.red) def __lowerCAmelCase ( self : str ): """simple docstring""" return (self.nir - self.blue) / (self.nir + self.blue) def __lowerCAmelCase ( self : Any ): """simple docstring""" return (self.redEdge - self.red) / (self.redEdge + self.red) def __lowerCAmelCase ( self : Optional[Any] ): """simple docstring""" return (self.nir - self.green) / (self.nir + self.green) def __lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def __lowerCAmelCase ( self : Optional[Any] ): """simple docstring""" return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def __lowerCAmelCase ( self : Optional[int] ): """simple docstring""" return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def __lowerCAmelCase ( self : Dict ): """simple docstring""" return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[Any]=0.08 , SCREAMING_SNAKE_CASE__ : Tuple=1.22 , SCREAMING_SNAKE_CASE__ : List[str]=0.03 ): """simple docstring""" return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def __lowerCAmelCase ( self : List[str] ): """simple docstring""" return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def __lowerCAmelCase ( self : Dict ): """simple docstring""" return (self.nir / self.green) - 1 def __lowerCAmelCase ( self : List[str] ): """simple docstring""" return (self.nir / self.redEdge) - 1 def __lowerCAmelCase ( self : Dict ): """simple docstring""" return (self.red - self.blue) / self.red def __lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def __lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" return self.nir - self.green def __lowerCAmelCase ( self : str ): """simple docstring""" return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def __lowerCAmelCase ( self : str ): """simple docstring""" UpperCamelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[Any]=0.16 ): """simple docstring""" return (self.nir - self.green) / (self.nir + self.green + y) def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int=0.5 ): """simple docstring""" return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def __lowerCAmelCase ( self : str ): """simple docstring""" return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : int=None ): """simple docstring""" return (self.nir - b) / (a * self.red) def __lowerCAmelCase ( self : int ): """simple docstring""" return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def __lowerCAmelCase ( self : List[Any] ): """simple docstring""" return (self.red + self.green + self.blue) / 30.5 def __lowerCAmelCase ( self : List[str] ): """simple docstring""" return self.nir / self.red def __lowerCAmelCase ( self : int ): """simple docstring""" return (self.rvi() - 1) / (self.rvi() + 1) def __lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def __lowerCAmelCase ( self : Optional[int] ): """simple docstring""" return self.green / (self.nir + self.red + self.green) def __lowerCAmelCase ( self : List[str] ): """simple docstring""" return self.nir / (self.nir + self.red + self.green) def __lowerCAmelCase ( self : Tuple ): """simple docstring""" return self.red / (self.nir + self.red + self.green) def __lowerCAmelCase ( self : Any ): """simple docstring""" return (self.green - self.red) / (self.green + self.red) def __lowerCAmelCase ( self : Any ): """simple docstring""" return (self.red - self.green) / (self.red + self.green) def __lowerCAmelCase ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) UpperCamelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def __lowerCAmelCase ( self : Dict ): """simple docstring""" return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def __lowerCAmelCase ( self : List[str] ): """simple docstring""" return self.nir / self.red def __lowerCAmelCase ( self : int ): """simple docstring""" return (self.ndvi() + 0.5) ** (1 / 2) def __lowerCAmelCase ( self : Optional[int] ): """simple docstring""" return (self.nir - self.redEdge) / (self.nir + self.redEdge)
282
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu""" def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple=100 , __magic_name__ : Optional[int]=" " ) -> List[str]: lowercase : List[Any] =text.split(__magic_name__ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )] def _lowerCAmelCase ( __magic_name__ : dict ) -> dict: lowercase , lowercase : int =[], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(__magic_name__ ): titles.append(title if title is not None else '''''' ) texts.append(__magic_name__ ) return {"title": titles, "text": texts} def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : DPRContextEncoder , __magic_name__ : DPRContextEncoderTokenizerFast ) -> dict: lowercase : Dict =ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=__magic_name__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids'''] lowercase : Optional[int] =ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def _lowerCAmelCase ( __magic_name__ : "RagExampleArguments" , __magic_name__ : "ProcessingArguments" , __magic_name__ : "IndexHnswArguments" , ) -> str: ###################################### logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase : Tuple =load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase : Optional[int] =dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc ) # And compute the embeddings lowercase : Any =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ ) lowercase : Any =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowercase : Optional[int] =Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space lowercase : Optional[Any] =dataset.map( partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , ) # And finally save your dataset lowercase : Optional[Any] =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(__magic_name__ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase : Union[str, Any] =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=__magic_name__ ) # And save the index lowercase : Dict =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(__magic_name__ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) lowerCamelCase_ = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) lowerCamelCase_ = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=lowercase__ , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) lowerCamelCase_ = field( default=16 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=7_68 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
92
0
"""simple docstring""" import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch __magic_name__ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ : def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : uuid.UUID = None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : int=None ): if not conversation_id: lowerCamelCase__ = uuid.uuida() if past_user_inputs is None: lowerCamelCase__ = [] if generated_responses is None: lowerCamelCase__ = [] lowerCamelCase__ = conversation_id lowerCamelCase__ = past_user_inputs lowerCamelCase__ = generated_responses lowerCamelCase__ = text def __eq__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ): if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def __UpperCAmelCase ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ): if self.new_user_input: if overwrite: logger.warning( f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ f"""with: \"{text}\".""" ) lowerCamelCase__ = text else: logger.warning( f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: lowerCamelCase__ = text def __UpperCAmelCase ( self : List[Any] ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) lowerCamelCase__ = None def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ): self.generated_responses.append(UpperCAmelCase__ ) def __UpperCAmelCase ( self : str ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self : int ): lowerCamelCase__ = f"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): lowerCamelCase__ = '''user''' if is_user else '''bot''' output += f"""{name} >> {text} \n""" return output @add_end_docstrings( lowercase__ , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , ) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ): super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ ) if self.tokenizer.pad_token_id is None: lowerCamelCase__ = self.tokenizer.eos_token def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Any=None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ): lowerCamelCase__ = {} lowerCamelCase__ = {} lowerCamelCase__ = {} if min_length_for_response is not None: lowerCamelCase__ = min_length_for_response if minimum_tokens is not None: lowerCamelCase__ = minimum_tokens if "max_length" in generate_kwargs: lowerCamelCase__ = generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: lowerCamelCase__ = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(UpperCAmelCase__ ) return preprocess_params, forward_params, postprocess_params def __call__( self : str , SCREAMING_SNAKE_CASE_ : Union[Conversation, List[Conversation]] , SCREAMING_SNAKE_CASE_ : Any=0 , **SCREAMING_SNAKE_CASE_ : Any ): lowerCamelCase__ = super().__call__(UpperCAmelCase__ , num_workers=UpperCAmelCase__ , **UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) == 1: return outputs[0] return outputs def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Conversation , SCREAMING_SNAKE_CASE_ : Any=32 ): if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ """Add user inputs with the conversation\'s `add_user_input` method""" ) if hasattr(self.tokenizer , """_build_conversation_input_ids""" ): lowerCamelCase__ = self.tokenizer._build_conversation_input_ids(UpperCAmelCase__ ) else: # If the tokenizer cannot handle conversations, we default to only the old version lowerCamelCase__ = self._legacy_parse_and_tokenize(UpperCAmelCase__ ) if self.framework == "pt": lowerCamelCase__ = torch.LongTensor([input_ids] ) elif self.framework == "tf": lowerCamelCase__ = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def __UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int=10 , **SCREAMING_SNAKE_CASE_ : Tuple ): lowerCamelCase__ = generate_kwargs.get("""max_length""" , self.model.config.max_length ) lowerCamelCase__ = model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) lowerCamelCase__ = max_length - minimum_tokens lowerCamelCase__ = model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: lowerCamelCase__ = model_inputs['''attention_mask'''][:, -trim:] lowerCamelCase__ = model_inputs.pop("""conversation""" ) lowerCamelCase__ = max_length lowerCamelCase__ = self.model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ ) if self.model.config.is_encoder_decoder: lowerCamelCase__ = 1 else: lowerCamelCase__ = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def __UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any]=True ): lowerCamelCase__ = model_outputs['''output_ids'''] lowerCamelCase__ = self.tokenizer.decode( output_ids[0] , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , ) lowerCamelCase__ = model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(UpperCAmelCase__ ) return conversation def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Conversation ): lowerCamelCase__ = self.tokenizer.eos_token_id lowerCamelCase__ = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) ) if len(UpperCAmelCase__ ) > self.tokenizer.model_max_length: lowerCamelCase__ = input_ids[-self.tokenizer.model_max_length :] return input_ids
129
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right UpperCamelCase_ = 128022 UpperCamelCase_ = 128028 @require_sentencepiece class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = MaMaaaTokenizer lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = True def lowerCamelCase_ ( self : Dict ): '''simple docstring''' super().setUp() lowercase : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] lowercase : List[Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) lowercase : List[Any] =Path(self.tmpdirname ) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowercase : Tuple =MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : int ): '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict ): '''simple docstring''' return ( "This is a test", "This is a test", ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Tuple ='''</s>''' lowercase : Union[str, Any] =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.get_tokenizer() lowercase : Optional[Any] =list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''</s>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''<s>''' ) self.assertEqual(len(UpperCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('''Skip this test while all models are still to be uploaded.''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Union[str, Any] =self.get_tokenizer() lowercase : str =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2, 3, 4, 5, 6] , ) lowercase : Optional[int] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) lowercase : Tuple =tokenizer.convert_tokens_to_string(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , '''This is a test''' ) @slow def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' # fmt: off lowercase : int ={'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): lowerCamelCase_ = 'facebook/m2m100_418M' lowerCamelCase_ = [ 'In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence', ] lowerCamelCase_ = [ 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', ] # fmt: off lowerCamelCase_ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2] @classmethod def lowerCamelCase_ ( cls : Optional[Any] ): '''simple docstring''' lowercase : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' ) lowercase : Optional[int] =1 return cls def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 ) self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 ) self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 ) self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : List[str] =self.tokenizer.get_vocab() self.assertEqual(len(UpperCAmelCase__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab['''<unk>'''] , 3 ) self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : List[Any] ='''en''' lowercase : Optional[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids ) # fmt: off lowercase : str =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on lowercase : Optional[Any] =self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) lowercase : Optional[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =tempfile.mkdtemp() lowercase : Tuple =self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(UpperCAmelCase__ ) lowercase : Union[str, Any] =MaMaaaTokenizer.from_pretrained(UpperCAmelCase__ ) self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase__ ) @require_torch def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[str] ='''en''' lowercase : int ='''fr''' lowercase : Union[str, Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors='''pt''' ) lowercase : str =shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: lowercase : int =batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[int] ='''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) lowercase : Union[str, Any] ='''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int ='''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) lowercase : Optional[Any] ='''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Optional[Any] =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' ) self.assertEqual( nested_simplify(UpperCAmelCase__ ) , { # en_XX, A, test, EOS '''input_ids''': [[128022, 58, 4183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128006, } , )
92
0
from ..utils import DummyObject, requires_backends class _lowerCAmelCase ( metaclass=lowercase__ ): _lowercase =['''torch''', '''transformers''', '''onnx'''] def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]: requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]: requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int: requires_backends(cls , ["torch", "transformers", "onnx"] ) class _lowerCAmelCase ( metaclass=lowercase__ ): _lowercase =['''torch''', '''transformers''', '''onnx'''] def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> str: requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]: requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]: requires_backends(cls , ["torch", "transformers", "onnx"] ) class _lowerCAmelCase ( metaclass=lowercase__ ): _lowercase =['''torch''', '''transformers''', '''onnx'''] def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]: requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]: requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]: requires_backends(cls , ["torch", "transformers", "onnx"] ) class _lowerCAmelCase ( metaclass=lowercase__ ): _lowercase =['''torch''', '''transformers''', '''onnx'''] def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Any: requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]: requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int: requires_backends(cls , ["torch", "transformers", "onnx"] ) class _lowerCAmelCase ( metaclass=lowercase__ ): _lowercase =['''torch''', '''transformers''', '''onnx'''] def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]: requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> str: requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int: requires_backends(cls , ["torch", "transformers", "onnx"] ) class _lowerCAmelCase ( metaclass=lowercase__ ): _lowercase =['''torch''', '''transformers''', '''onnx'''] def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> str: requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any: requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]: requires_backends(cls , ["torch", "transformers", "onnx"] )
290
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int: try: lowercase : Any =int(__magic_name__ ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) lowercase : Optional[Any] =2 lowercase : Dict =0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 lowercase : Union[str, Any] =i while n % i == 0: lowercase : Optional[int] =n // i i += 1 return int(__magic_name__ ) if __name__ == "__main__": print(f'''{solution() = }''')
92
0
'''simple docstring''' from __future__ import annotations def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : list[str] | None = None ) -> list[list[str]]: __snake_case : int = word_bank or [] # create a table __snake_case : int = len(_UpperCAmelCase ) + 1 __snake_case : list[list[list[str]]] = [] for _ in range(_UpperCAmelCase ): table.append([] ) # seed value __snake_case : Union[str, Any] = [[]] # because empty string has empty combination # iterate through the indices for i in range(_UpperCAmelCase ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(_UpperCAmelCase )] == word: __snake_case : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(_UpperCAmelCase )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(_UpperCAmelCase )]: combination.reverse() return table[len(_UpperCAmelCase )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
286
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'speech_to_text_2' lowerCamelCase_ = ['past_key_values'] lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : int , UpperCAmelCase__ : Dict=10000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1024 , **UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : List[str] =vocab_size lowercase : Optional[int] =d_model lowercase : Optional[Any] =decoder_ffn_dim lowercase : Any =decoder_layers lowercase : Dict =decoder_attention_heads lowercase : List[Any] =dropout lowercase : List[Any] =attention_dropout lowercase : Any =activation_dropout lowercase : Optional[Any] =activation_function lowercase : Optional[int] =init_std lowercase : Dict =decoder_layerdrop lowercase : Optional[int] =use_cache lowercase : Optional[Any] =decoder_layers lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True lowercase : str =max_target_positions super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
92
0
from __future__ import annotations import math def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ): """simple docstring""" if depth < 0: raise ValueError('Depth cannot be less than 0' ) if len(UpperCAmelCase__ ) == 0: raise ValueError('Scores cannot be empty' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 ,node_index * 2 ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ) ,minimax(depth + 1 ,node_index * 2 + 1 ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ) ,) return min( minimax(depth + 1 ,node_index * 2 ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ) ,minimax(depth + 1 ,node_index * 2 + 1 ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ) ,) def SCREAMING_SNAKE_CASE ( ): """simple docstring""" _SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423] _SCREAMING_SNAKE_CASE = math.log(len(UpperCAmelCase__ ) ,2 ) print('Optimal value : ' ,end='' ) print(minimax(0 ,0 ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
605
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=[10, 20, 30, 40] , UpperCAmelCase__ : Any=[2, 2, 3, 2] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Dict=[2, 3, 4] , UpperCAmelCase__ : Optional[int]=None , ): '''simple docstring''' lowercase : List[Any] =parent lowercase : Tuple =batch_size lowercase : List[str] =image_size lowercase : List[Any] =num_channels lowercase : Union[str, Any] =num_stages lowercase : int =hidden_sizes lowercase : Any =depths lowercase : Tuple =is_training lowercase : str =use_labels lowercase : List[Any] =intermediate_size lowercase : int =hidden_act lowercase : Union[str, Any] =num_labels lowercase : Optional[int] =initializer_range lowercase : int =out_features lowercase : List[str] =out_indices lowercase : str =scope def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : Dict =None if self.use_labels: lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_labels ) lowercase : Dict =self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Any ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' lowercase : Dict =ConvNextVaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Dict =ConvNextVaForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Union[str, Any] =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[int] =model(UpperCAmelCase__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase : Optional[Any] =None lowercase : str =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Any =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : str =config_and_inputs lowercase : Any ={'''pixel_values''': pixel_values} return config, inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : str =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : List[str] =config_and_inputs lowercase : Optional[Any] ={'''pixel_values''': pixel_values, '''labels''': labels} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCamelCase_ = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Dict =ConvNextVaModelTester(self ) lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Any ): '''simple docstring''' return @unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : Optional[int] =True if model_class.__name__ in [ *get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ ), ]: continue lowercase : Dict =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : List[Any] =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : List[Any] =False lowercase : Any =True if ( model_class.__name__ in [*get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ )] or not model_class.supports_gradient_checkpointing ): continue lowercase : Any =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.gradient_checkpointing_enable() model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : int =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Dict =model_class(UpperCAmelCase__ ) lowercase : Union[str, Any] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : int =[*signature.parameters.keys()] lowercase : Optional[Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ): lowercase : int =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase : Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase : List[Any] =self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : List[str] =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase : Tuple =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : List[Any] =ConvNextVaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def _lowerCAmelCase ( ) -> List[Any]: lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase__ ) lowercase : int =self.default_image_processor lowercase : List[str] =prepare_img() lowercase : List[Any] =preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ ) # forward pass with torch.no_grad(): lowercase : Dict =model(**UpperCAmelCase__ ) # verify the logits lowercase : Optional[Any] =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase__ ) lowercase : Tuple =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
"""simple docstring""" import unittest import numpy as np def __a ( A , A , A , A = None , ) -> np.ndarray: '''simple docstring''' A__ = np.shape(A ) A__ = np.shape(A ) A__ = np.shape(A ) if shape_a[0] != shape_b[0]: A__ = ( '''Expected the same number of rows for A and B. ''' f"""Instead found A of size {shape_a} and B of size {shape_b}""" ) raise ValueError(A ) if shape_b[1] != shape_c[1]: A__ = ( '''Expected the same number of columns for B and C. ''' f"""Instead found B of size {shape_b} and C of size {shape_c}""" ) raise ValueError(A ) A__ = pseudo_inv if a_inv is None: try: A__ = np.linalg.inv(A ) except np.linalg.LinAlgError: raise ValueError( "Input matrix A is not invertible. Cannot compute Schur complement." ) return mat_c - mat_b.T @ a_inv @ mat_b class lowerCAmelCase__ ( unittest.TestCase ): def lowercase_ ( self ): '''simple docstring''' A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) A__ = np.array([[0, 3], [3, 0], [2, 3]] ) A__ = np.array([[2, 1], [6, 3]] ) A__ = schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) A__ = np.block([[a, b], [b.T, c]] ) A__ = np.linalg.det(UpperCAmelCase__ ) A__ = np.linalg.det(UpperCAmelCase__ ) A__ = np.linalg.det(UpperCAmelCase__ ) self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s ) def lowercase_ ( self ): '''simple docstring''' A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) A__ = np.array([[0, 3], [3, 0], [2, 3]] ) A__ = np.array([[2, 1], [6, 3]] ) with self.assertRaises(UpperCAmelCase__ ): schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowercase_ ( self ): '''simple docstring''' A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) A__ = np.array([[0, 3], [3, 0], [2, 3]] ) A__ = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(UpperCAmelCase__ ): schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
337
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels UpperCamelCase_ = object() # For specifying empty leaf dict `{}` UpperCamelCase_ = object() def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Optional[int]: lowercase : Optional[Any] =tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ): lowercase : Union[str, Any] =[x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )] if matches and all(__magic_name__ ): return True return False def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[str]: def replace(__magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ): for rule, replacement in rules: if _match(__magic_name__ , __magic_name__ ): return replacement return val return replace def _lowerCAmelCase ( ) -> int: return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , __magic_name__ )), (("transformer", "wte", "embedding"), P('''mp''' , __magic_name__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , __magic_name__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__magic_name__ , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , __magic_name__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _lowerCAmelCase ( __magic_name__ : str ) -> int: lowercase : int =_get_partition_rules() lowercase : Tuple =_replacement_rules(__magic_name__ ) lowercase : Any ={k: _unmatched for k in flatten_dict(__magic_name__ )} lowercase : Any ={k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__magic_name__ ) )
92
0
"""simple docstring""" import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class snake_case ( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Tuple=1_8 , UpperCamelCase__ : List[str]=3_0 , UpperCamelCase__ : Union[str, Any]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : Any=True , )-> Any: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8} __lowerCAmelCase: Tuple = parent __lowerCAmelCase: str = batch_size __lowerCAmelCase: List[str] = num_channels __lowerCAmelCase: int = image_size __lowerCAmelCase: int = min_resolution __lowerCAmelCase: Optional[Any] = max_resolution __lowerCAmelCase: Any = do_resize __lowerCAmelCase: int = size __lowerCAmelCase: int = do_normalize def lowercase_ ( self : int)-> Union[str, Any]: '''simple docstring''' return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8866443634033203, 0.6618829369544983, 0.3891746401786804], [-0.6042559146881104, -0.02295008860528469, 0.5423797369003296], ]), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class snake_case ( lowercase__, unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Any = ImageGPTImageProcessor if is_vision_available() else None def lowercase_ ( self : int)-> int: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = ImageGPTImageProcessingTester(self) @property def lowercase_ ( self : List[Any])-> Tuple: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self : int)-> Tuple: '''simple docstring''' __lowerCAmelCase: Optional[Any] = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase__ , "clusters")) self.assertTrue(hasattr(UpperCAmelCase__ , "do_resize")) self.assertTrue(hasattr(UpperCAmelCase__ , "size")) self.assertTrue(hasattr(UpperCAmelCase__ , "do_normalize")) def lowercase_ ( self : Optional[Any])-> Tuple: '''simple docstring''' __lowerCAmelCase: Any = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8}) __lowerCAmelCase: Any = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2) self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2}) def lowercase_ ( self : Optional[Any])-> Optional[int]: '''simple docstring''' __lowerCAmelCase: List[str] = self.image_processing_class(**self.image_processor_dict) __lowerCAmelCase: List[str] = json.loads(image_processor.to_json_string()) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCAmelCase__ , obj[key])) else: self.assertEqual(obj[key] , UpperCAmelCase__) def lowercase_ ( self : Tuple)-> Any: '''simple docstring''' __lowerCAmelCase: str = self.image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase: Any = os.path.join(UpperCAmelCase__ , "image_processor.json") image_processor_first.to_json_file(UpperCAmelCase__) __lowerCAmelCase: Optional[Any] = self.image_processing_class.from_json_file(UpperCAmelCase__).to_dict() __lowerCAmelCase: Tuple = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCAmelCase__ , image_processor_second[key])) else: self.assertEqual(image_processor_first[key] , UpperCAmelCase__) def lowercase_ ( self : List[str])-> Optional[int]: '''simple docstring''' __lowerCAmelCase: Optional[int] = self.image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(UpperCAmelCase__) __lowerCAmelCase: Tuple = self.image_processing_class.from_pretrained(UpperCAmelCase__).to_dict() __lowerCAmelCase: Tuple = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCAmelCase__ , image_processor_second[key])) else: self.assertEqual(image_processor_first[key] , UpperCAmelCase__) @unittest.skip("ImageGPT requires clusters at initialization") def lowercase_ ( self : Optional[Any])-> Dict: '''simple docstring''' pass def a__ ( ) -> Dict: __lowerCAmelCase: List[Any] = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" ) __lowerCAmelCase: Optional[int] = Image.open(dataset[4]["file"] ) __lowerCAmelCase: Optional[Any] = Image.open(dataset[5]["file"] ) __lowerCAmelCase: Optional[Any] = [imagea, imagea] return images @require_vision @require_torch class snake_case ( unittest.TestCase ): @slow def lowercase_ ( self : Optional[int])-> Dict: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small") __lowerCAmelCase: int = prepare_images() # test non-batched __lowerCAmelCase: int = image_processing(images[0] , return_tensors="pt") self.assertIsInstance(encoding.input_ids , torch.LongTensor) self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4)) __lowerCAmelCase: Tuple = [3_0_6, 1_9_1, 1_9_1] self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCAmelCase__) # test batched __lowerCAmelCase: Optional[int] = image_processing(UpperCAmelCase__ , return_tensors="pt") self.assertIsInstance(encoding.input_ids , torch.LongTensor) self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4)) __lowerCAmelCase: str = [3_0_3, 1_3, 1_3] self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCAmelCase__)
346
'''simple docstring''' from collections import defaultdict def _lowerCAmelCase ( __magic_name__ : int ) -> int: lowercase : Optional[Any] =1 lowercase : Union[str, Any] =True for v in tree[start]: if v not in visited: ret += dfs(__magic_name__ ) if ret % 2 == 0: cuts.append(__magic_name__ ) return ret def _lowerCAmelCase ( ) -> int: dfs(1 ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 10, 9 UpperCamelCase_ = defaultdict(list) UpperCamelCase_ = {} UpperCamelCase_ = [] UpperCamelCase_ = 0 UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
92
0
def lowercase_ ( _UpperCamelCase ): '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError('''String must only contain alphabetic characters.''' ) __lowercase = sorted(string.lower() ) return len(_UpperCamelCase ) == len(set(_UpperCamelCase ) ) if __name__ == "__main__": a : str = input('''Enter a string ''').strip() a : int = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
639
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase_ = logging.get_logger(__name__) def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Dict: lowercase : List[str] =R'''\w+[.]\d+''' lowercase : List[str] =re.findall(__magic_name__ , __magic_name__ ) for pat in pats: lowercase : Optional[int] =key.replace(__magic_name__ , '''_'''.join(pat.split('''.''' ) ) ) return key def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> str: lowercase : Dict =pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowercase : Dict =pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowercase : Tuple =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowercase : Tuple =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowercase : str =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowercase : Optional[Any] =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowercase : Dict =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowercase : Union[str, Any] =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=42 ) -> List[str]: # Step 1: Convert pytorch tensor to numpy lowercase : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowercase : str =flax_model.init_weights(PRNGKey(__magic_name__ ) ) lowercase : Dict =flatten_dict(__magic_name__ ) lowercase : Dict ={} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowercase : Dict =rename_key(__magic_name__ ) lowercase : Optional[int] =tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowercase , lowercase : Any =rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowercase : Tuple =jnp.asarray(__magic_name__ ) return unflatten_dict(__magic_name__ )
92
0
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __SCREAMING_SNAKE_CASE = datasets.utils.logging.get_logger(__name__) @dataclass class lowerCAmelCase_ ( datasets.BuilderConfig ): '''simple docstring''' _lowercase = 10_000 _lowercase = None _lowercase = None class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' _lowercase = ParquetConfig def __lowerCamelCase ( self ): return datasets.DatasetInfo(features=self.config.features ) def __lowerCamelCase ( self , __UpperCAmelCase ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) SCREAMING_SNAKE_CASE_ : List[str] =dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase__ , (str, list, tuple) ): SCREAMING_SNAKE_CASE_ : Dict =data_files if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): SCREAMING_SNAKE_CASE_ : Tuple =[files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE_ : Optional[int] =[dl_manager.iter_files(UpperCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] SCREAMING_SNAKE_CASE_ : int =[] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): SCREAMING_SNAKE_CASE_ : List[Any] =[files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE_ : Optional[int] =[dl_manager.iter_files(UpperCAmelCase__ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(UpperCAmelCase__ ): with open(UpperCAmelCase__ , 'rb' ) as f: SCREAMING_SNAKE_CASE_ : Any =datasets.Features.from_arrow_schema(pq.read_schema(UpperCAmelCase__ ) ) break splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'files': files} ) ) return splits def __lowerCamelCase ( self , __UpperCAmelCase ): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ : Dict =table_cast(UpperCAmelCase__ , self.info.features.arrow_schema ) return pa_table def __lowerCamelCase ( self , __UpperCAmelCase ): SCREAMING_SNAKE_CASE_ : Optional[Any] =self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F"""Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ): with open(UpperCAmelCase__ , 'rb' ) as f: SCREAMING_SNAKE_CASE_ : Dict =pq.ParquetFile(UpperCAmelCase__ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): SCREAMING_SNAKE_CASE_ : int =pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCAmelCase__ ) except ValueError as e: logger.error(F"""Failed to read file \'{file}\' with error {type(UpperCAmelCase__ )}: {e}""" ) raise
220
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. UpperCamelCase_ = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. UpperCamelCase_ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. UpperCamelCase_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]: lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] ) return (item, float(__magic_name__ )) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]: lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 ) lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:] lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str: lowercase : Union[str, Any] =list(__magic_name__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowercase : Dict =random.choice(__magic_name__ ) return "".join(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]: lowercase : Any =[] # Generate more children proportionally to the fitness score. lowercase : Dict =int(parent_a[1] * 100 ) + 1 lowercase : List[str] =10 if child_n >= 10 else child_n for _ in range(__magic_name__ ): lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0] lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ ) # Append new string to the population list. pop.append(mutate(__magic_name__ , __magic_name__ ) ) pop.append(mutate(__magic_name__ , __magic_name__ ) ) return pop def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(__magic_name__ ) # Verify that the target contains no genes besides the ones inside genes variable. lowercase : Optional[int] =sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(__magic_name__ ) # Generate random starting population. lowercase : int =[] for _ in range(__magic_name__ ): population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) ) # Just some logs to know what the algorithms is doing. lowercase , lowercase : Optional[int] =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__magic_name__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population] # Check if there is a matching evolution. lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowercase : Any =population[: int(N_POPULATION / 3 )] population.clear() population.extend(__magic_name__ ) # Normalize population score to be between 0 and 1. lowercase : Dict =[ (item, score / len(__magic_name__ )) for item, score in population_score ] # This is selection for i in range(__magic_name__ ): population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__magic_name__ ) > N_POPULATION: break if __name__ == "__main__": UpperCamelCase_ = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) UpperCamelCase_ = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
92
0
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE_ : """simple docstring""" __lowerCAmelCase : Optional[Any] =field( default=1_2_8 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __lowerCAmelCase : List[Any] =field( default=lowercase__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) __lowerCAmelCase : Any =field( default=lowercase__ , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) __lowerCAmelCase : Union[str, Any] =field( default=lowercase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) __lowerCAmelCase : List[str] =field( default=lowercase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) __lowerCAmelCase : List[Any] =field( default=lowercase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of prediction examples to this ''' '''value if set.''' ) } , ) @dataclass class SCREAMING_SNAKE_CASE_ : """simple docstring""" __lowerCAmelCase : Optional[int] =field( default=lowercase__ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) __lowerCAmelCase : Tuple =field( default=lowercase__ , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} ) __lowerCAmelCase : Tuple =field( default=lowercase__ , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} ) __lowerCAmelCase : Tuple =field( default=lowercase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __lowerCAmelCase : Dict =field( default=lowercase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) __lowerCAmelCase : Optional[int] =field( default=lowercase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) __lowerCAmelCase : Tuple =field( default=lowercase__ , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , ) __lowerCAmelCase : Optional[int] =field( default=lowercase__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) __lowerCAmelCase : Optional[int] =field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) __lowerCAmelCase : Optional[int] =field( default=lowercase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) __lowerCAmelCase : List[Any] =field( default=lowercase__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , ) def _snake_case () -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _lowercase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) _lowercase =parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_xnli' , _snake_case) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout)] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _lowercase =training_args.get_process_log_level() logger.setLevel(_snake_case) datasets.utils.logging.set_verbosity(_snake_case) transformers.utils.logging.set_verbosity(_snake_case) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''') logger.info(f'''Training/evaluation parameters {training_args}''') # Detecting last checkpoint. _lowercase =None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: _lowercase =get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.') elif last_checkpoint is not None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.') # Set seed before initializing model. set_seed(training_args.seed) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: _lowercase =load_dataset( 'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: _lowercase =load_dataset( 'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) _lowercase =train_dataset.features['''label'''].names if training_args.do_eval: _lowercase =load_dataset( 'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) _lowercase =eval_dataset.features['''label'''].names if training_args.do_predict: _lowercase =load_dataset( 'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) _lowercase =predict_dataset.features['''label'''].names # Labels _lowercase =len(_snake_case) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowercase =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_snake_case , idalabel={str(_snake_case): label for i, label in enumerate(_snake_case)} , labelaid={label: i for i, label in enumerate(_snake_case)} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _lowercase =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _lowercase =AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: _lowercase ='''max_length''' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch _lowercase =False def preprocess_function(_snake_case : List[str]): # Tokenize the texts return tokenizer( examples['premise'] , examples['hypothesis'] , padding=_snake_case , max_length=data_args.max_seq_length , truncation=_snake_case , ) if training_args.do_train: if data_args.max_train_samples is not None: _lowercase =min(len(_snake_case) , data_args.max_train_samples) _lowercase =train_dataset.select(range(_snake_case)) with training_args.main_process_first(desc='train dataset map pre-processing'): _lowercase =train_dataset.map( _snake_case , batched=_snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , ) # Log a few random samples from the training set: for index in random.sample(range(len(_snake_case)) , 3): logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''') if training_args.do_eval: if data_args.max_eval_samples is not None: _lowercase =min(len(_snake_case) , data_args.max_eval_samples) _lowercase =eval_dataset.select(range(_snake_case)) with training_args.main_process_first(desc='validation dataset map pre-processing'): _lowercase =eval_dataset.map( _snake_case , batched=_snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , ) if training_args.do_predict: if data_args.max_predict_samples is not None: _lowercase =min(len(_snake_case) , data_args.max_predict_samples) _lowercase =predict_dataset.select(range(_snake_case)) with training_args.main_process_first(desc='prediction dataset map pre-processing'): _lowercase =predict_dataset.map( _snake_case , batched=_snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , ) # Get the metric function _lowercase =evaluate.load('xnli') # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_snake_case : EvalPrediction): _lowercase =p.predictions[0] if isinstance(p.predictions , _snake_case) else p.predictions _lowercase =np.argmax(_snake_case , axis=1) return metric.compute(predictions=_snake_case , references=p.label_ids) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: _lowercase =default_data_collator elif training_args.fpaa: _lowercase =DataCollatorWithPadding(_snake_case , pad_to_multiple_of=8) else: _lowercase =None # Initialize our Trainer _lowercase =Trainer( model=_snake_case , args=_snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_snake_case , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: _lowercase =None if training_args.resume_from_checkpoint is not None: _lowercase =training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowercase =last_checkpoint _lowercase =trainer.train(resume_from_checkpoint=_snake_case) _lowercase =train_result.metrics _lowercase =( data_args.max_train_samples if data_args.max_train_samples is not None else len(_snake_case) ) _lowercase =min(_snake_case , len(_snake_case)) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('train' , _snake_case) trainer.save_metrics('train' , _snake_case) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***') _lowercase =trainer.evaluate(eval_dataset=_snake_case) _lowercase =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_snake_case) _lowercase =min(_snake_case , len(_snake_case)) trainer.log_metrics('eval' , _snake_case) trainer.save_metrics('eval' , _snake_case) # Prediction if training_args.do_predict: logger.info('*** Predict ***') _lowercase =trainer.predict(_snake_case , metric_key_prefix='predict') _lowercase =( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_snake_case) ) _lowercase =min(_snake_case , len(_snake_case)) trainer.log_metrics('predict' , _snake_case) trainer.save_metrics('predict' , _snake_case) _lowercase =np.argmax(_snake_case , axis=1) _lowercase =os.path.join(training_args.output_dir , 'predictions.txt') if trainer.is_world_process_zero(): with open(_snake_case , 'w') as writer: writer.write('index\tprediction\n') for index, item in enumerate(_snake_case): _lowercase =label_list[item] writer.write(f'''{index}\t{item}\n''') if __name__ == "__main__": main()
181
'''simple docstring''' import datasets UpperCamelCase_ = """\ @InProceedings{conneau2018xnli, author = \"Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin\", title = \"XNLI: Evaluating Cross-lingual Sentence Representations\", booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\", year = \"2018\", publisher = \"Association for Computational Linguistics\", location = \"Brussels, Belgium\", } """ UpperCamelCase_ = """\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). """ UpperCamelCase_ = """ Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: 'accuracy': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric(\"xnli\") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} """ def _lowerCAmelCase ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): def lowerCamelCase_ ( self : str ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
92
0
"""simple docstring""" import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests __UpperCamelCase : Tuple = open # noqa: we just need to have a builtin inside this module to test it properly
4
'''simple docstring''' from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class __SCREAMING_SNAKE_CASE : def __init__( self : List[str] , UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : Any =parent lowercase : Optional[int] =13 lowercase : Union[str, Any] =7 lowercase : str =30 lowercase : Optional[int] =self.seq_length + self.mem_len lowercase : Dict =15 lowercase : List[str] =True lowercase : Optional[int] =True lowercase : Tuple =99 lowercase : str =[10, 50, 80] lowercase : List[Any] =32 lowercase : Optional[int] =32 lowercase : int =4 lowercase : Any =8 lowercase : List[Any] =128 lowercase : List[str] =2 lowercase : Tuple =2 lowercase : int =None lowercase : Optional[int] =1 lowercase : int =0 lowercase : List[str] =3 lowercase : str =self.vocab_size - 1 lowercase : Tuple =0.01 def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : str =None if self.use_labels: lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Union[str, Any] =TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' random.seed(self.seed ) tf.random.set_seed(self.seed ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Tuple =TFTransfoXLModel(UpperCAmelCase__ ) lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple() lowercase : List[str] ={'''input_ids''': input_ids_a, '''mems''': mems_a} lowercase , lowercase : Any =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : int =TFTransfoXLLMHeadModel(UpperCAmelCase__ ) lowercase , lowercase : Tuple =model(UpperCAmelCase__ ).to_tuple() lowercase : Optional[Any] ={'''input_ids''': input_ids_a, '''labels''': lm_labels} lowercase , lowercase : Optional[int] =model(UpperCAmelCase__ ).to_tuple() lowercase , lowercase : List[str] =model([input_ids_a, mems_a] ).to_tuple() lowercase : int ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels} lowercase , lowercase : str =model(UpperCAmelCase__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[int] =TFTransfoXLForSequenceClassification(UpperCAmelCase__ ) lowercase : Union[str, Any] =model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.prepare_config_and_inputs() ((lowercase) , (lowercase) , (lowercase) , (lowercase)) : Optional[Any] =config_and_inputs lowercase : Union[str, Any] ={'''input_ids''': input_ids_a} return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) lowerCamelCase_ = () if is_tf_available() else () lowerCamelCase_ = ( { 'feature-extraction': TFTransfoXLModel, 'text-classification': TFTransfoXLForSequenceClassification, 'text-generation': TFTransfoXLLMHeadModel, 'zero-shot': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =TFTransfoXLModelTester(self ) lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' self.model_tester.set_seed() lowercase : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.model_tester.set_seed() lowercase : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common() lowercase : int =[TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: lowercase : str =model_class(UpperCAmelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: lowercase : Union[str, Any] =model.get_output_embeddings() assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer ) lowercase : Any =model.get_bias() assert name is None else: lowercase : Optional[int] =model.get_output_embeddings() assert x is None lowercase : Optional[int] =model.get_bias() assert name is None def lowerCamelCase_ ( self : Any ): '''simple docstring''' # TODO JP: Make TransfoXL XLA compliant pass @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : int =TFTransfoXLModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' ) def lowerCamelCase_ ( self : int ): '''simple docstring''' pass @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @unittest.skip('''Skip test until #12651 is resolved.''' ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' ) # fmt: off lowercase : Tuple =tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off lowercase : Optional[int] =[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> lowercase : int =model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ ) self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
92
0
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _snake_case = False class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) UpperCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) UpperCamelCase = torch.manual_seed(0 ) UpperCamelCase = pipe.dual_guided( prompt='first prompt' , image=UpperCAmelCase__ , text_to_image_strength=0.75 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCAmelCase__ ) UpperCamelCase = VersatileDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) UpperCamelCase = generator.manual_seed(0 ) UpperCamelCase = pipe.dual_guided( prompt='first prompt' , image=UpperCAmelCase__ , text_to_image_strength=0.75 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __lowerCAmelCase ( self : Optional[int] ): """simple docstring""" UpperCamelCase = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) UpperCamelCase = '''cyberpunk 2077''' UpperCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) UpperCamelCase = torch.manual_seed(0 ) UpperCamelCase = pipe.dual_guided( prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , text_to_image_strength=0.75 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images UpperCamelCase = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 UpperCamelCase = '''A painting of a squirrel eating a burger ''' UpperCamelCase = torch.manual_seed(0 ) UpperCamelCase = pipe.text_to_image( prompt=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images UpperCamelCase = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 UpperCamelCase = pipe.image_variation(UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='numpy' ).images UpperCamelCase = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
282
'''simple docstring''' import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE : def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=36 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Tuple=None , ): '''simple docstring''' lowercase : str =parent lowercase : int =batch_size lowercase : Any =seq_length lowercase : int =is_training lowercase : str =use_input_mask lowercase : int =use_token_type_ids lowercase : Dict =use_labels lowercase : int =vocab_size lowercase : str =embedding_size lowercase : Union[str, Any] =hidden_size lowercase : Tuple =num_hidden_layers lowercase : Any =num_hidden_groups lowercase : Union[str, Any] =num_attention_heads lowercase : Any =intermediate_size lowercase : Tuple =hidden_act lowercase : Optional[int] =hidden_dropout_prob lowercase : Union[str, Any] =attention_probs_dropout_prob lowercase : List[Any] =max_position_embeddings lowercase : int =type_vocab_size lowercase : int =type_sequence_label_size lowercase : Any =initializer_range lowercase : List[Any] =num_labels lowercase : int =num_choices lowercase : Optional[int] =scope def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[int] =None if self.use_input_mask: lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : Dict =None if self.use_token_type_ids: lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : Tuple =None lowercase : Any =None lowercase : Dict =None if self.use_labels: lowercase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices ) lowercase : Any =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ): '''simple docstring''' lowercase : int =AlbertModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) lowercase : int =model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ): '''simple docstring''' lowercase : Tuple =AlbertForPreTraining(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , sentence_order_label=UpperCAmelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Tuple =AlbertForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : List[str] =AlbertForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ): '''simple docstring''' lowercase : Optional[Any] =self.num_labels lowercase : Any =AlbertForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ): '''simple docstring''' lowercase : List[Any] =self.num_labels lowercase : str =AlbertForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : Optional[int] =self.num_choices lowercase : List[Any] =AlbertForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] =model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Union[str, Any] =self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Dict =config_and_inputs lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase_ = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase_ = True def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=False ): '''simple docstring''' lowercase : Optional[int] =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class in get_values(UpperCAmelCase__ ): lowercase : Any =torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ ) lowercase : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) return inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Tuple =AlbertModelTester(self ) lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowercase : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase : Tuple =type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : str =AlbertModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase : int =AlbertModel.from_pretrained('''albert-base-v2''' ) lowercase : Optional[int] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowercase : Any =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] lowercase : int =torch.Size((1, 11, 768) ) self.assertEqual(output.shape , UpperCAmelCase__ ) lowercase : Union[str, Any] =torch.tensor( [[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
"""simple docstring""" from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig __magic_name__ = logging.get_logger(__name__) # General docstring __magic_name__ = """RegNetConfig""" # Base docstring __magic_name__ = """facebook/regnet-y-040""" __magic_name__ = [1, 10_88, 7, 7] # Image classification docstring __magic_name__ = """facebook/regnet-y-040""" __magic_name__ = """tabby, tabby cat""" __magic_name__ = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , ): super().__init__() lowerCamelCase__ = nn.Convad( UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , stride=UpperCAmelCase__ , padding=kernel_size // 2 , groups=UpperCAmelCase__ , bias=UpperCAmelCase__ , ) lowerCamelCase__ = nn.BatchNormad(UpperCAmelCase__ ) lowerCamelCase__ = ACTaFN[activation] if activation is not None else nn.Identity() def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ): lowerCamelCase__ = self.convolution(UpperCAmelCase__ ) lowerCamelCase__ = self.normalization(UpperCAmelCase__ ) lowerCamelCase__ = self.activation(UpperCAmelCase__ ) return hidden_state class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig ): super().__init__() lowerCamelCase__ = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) lowerCamelCase__ = config.num_channels def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ): lowerCamelCase__ = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" ) lowerCamelCase__ = self.embedder(UpperCAmelCase__ ) return hidden_state class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 ): super().__init__() lowerCamelCase__ = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , stride=UpperCAmelCase__ , bias=UpperCAmelCase__ ) lowerCamelCase__ = nn.BatchNormad(UpperCAmelCase__ ) def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tensor ): lowerCamelCase__ = self.convolution(UpperCAmelCase__ ) lowerCamelCase__ = self.normalization(UpperCAmelCase__ ) return hidden_state class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): super().__init__() lowerCamelCase__ = nn.AdaptiveAvgPoolad((1, 1) ) lowerCamelCase__ = nn.Sequential( nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 ) , nn.Sigmoid() , ) def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ): lowerCamelCase__ = self.pooler(UpperCAmelCase__ ) lowerCamelCase__ = self.attention(UpperCAmelCase__ ) lowerCamelCase__ = hidden_state * attention return hidden_state class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 ): super().__init__() lowerCamelCase__ = in_channels != out_channels or stride != 1 lowerCamelCase__ = max(1 , out_channels // config.groups_width ) lowerCamelCase__ = ( RegNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity() ) lowerCamelCase__ = nn.Sequential( RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ ) , ) lowerCamelCase__ = ACTaFN[config.hidden_act] def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ): lowerCamelCase__ = hidden_state lowerCamelCase__ = self.layer(UpperCAmelCase__ ) lowerCamelCase__ = self.shortcut(UpperCAmelCase__ ) hidden_state += residual lowerCamelCase__ = self.activation(UpperCAmelCase__ ) return hidden_state class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 ): super().__init__() lowerCamelCase__ = in_channels != out_channels or stride != 1 lowerCamelCase__ = max(1 , out_channels // config.groups_width ) lowerCamelCase__ = ( RegNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity() ) lowerCamelCase__ = nn.Sequential( RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act ) , RegNetSELayer(UpperCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ ) , ) lowerCamelCase__ = ACTaFN[config.hidden_act] def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Tuple ): lowerCamelCase__ = hidden_state lowerCamelCase__ = self.layer(UpperCAmelCase__ ) lowerCamelCase__ = self.shortcut(UpperCAmelCase__ ) hidden_state += residual lowerCamelCase__ = self.activation(UpperCAmelCase__ ) return hidden_state class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , ): super().__init__() lowerCamelCase__ = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer lowerCamelCase__ = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , ) , *[layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) for _ in range(depth - 1 )] , ) def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): lowerCamelCase__ = self.layers(UpperCAmelCase__ ) return hidden_state class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : int , SCREAMING_SNAKE_CASE_ : RegNetConfig ): super().__init__() lowerCamelCase__ = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) lowerCamelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(UpperCAmelCase__ , config.depths[1:] ): self.stages.append(RegNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ ) ) def __UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ): lowerCamelCase__ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowerCamelCase__ = hidden_states + (hidden_state,) lowerCamelCase__ = stage_module(UpperCAmelCase__ ) if output_hidden_states: lowerCamelCase__ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ ) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case = RegNetConfig snake_case = "regnet" snake_case = "pixel_values" snake_case = True def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ): if isinstance(UpperCAmelCase__ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" ) elif isinstance(UpperCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int]=False ): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowerCamelCase__ = value __magic_name__ = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ __magic_name__ = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , lowercase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class SCREAMING_SNAKE_CASE__ ( lowercase__ ): def __init__( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] ): super().__init__(UpperCAmelCase__ ) lowerCamelCase__ = config lowerCamelCase__ = RegNetEmbeddings(UpperCAmelCase__ ) lowerCamelCase__ = RegNetEncoder(UpperCAmelCase__ ) lowerCamelCase__ = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None ): lowerCamelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase__ = self.embedder(UpperCAmelCase__ ) lowerCamelCase__ = self.encoder( UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ ) lowerCamelCase__ = encoder_outputs[0] lowerCamelCase__ = self.pooler(UpperCAmelCase__ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowercase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class SCREAMING_SNAKE_CASE__ ( lowercase__ ): def __init__( self : str , SCREAMING_SNAKE_CASE_ : Any ): super().__init__(UpperCAmelCase__ ) lowerCamelCase__ = config.num_labels lowerCamelCase__ = RegNetModel(UpperCAmelCase__ ) # classification head lowerCamelCase__ = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __UpperCAmelCase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ): lowerCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase__ = self.regnet(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ ) lowerCamelCase__ = outputs.pooler_output if return_dict else outputs[1] lowerCamelCase__ = self.classifier(UpperCAmelCase__ ) lowerCamelCase__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCamelCase__ = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCamelCase__ = '''single_label_classification''' else: lowerCamelCase__ = '''multi_label_classification''' if self.config.problem_type == "regression": lowerCamelCase__ = MSELoss() if self.num_labels == 1: lowerCamelCase__ = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowerCamelCase__ = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ ) elif self.config.problem_type == "single_label_classification": lowerCamelCase__ = CrossEntropyLoss() lowerCamelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCamelCase__ = BCEWithLogitsLoss() lowerCamelCase__ = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ ) if not return_dict: lowerCamelCase__ = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
129
'''simple docstring''' import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ): '''simple docstring''' if dst_width < 0 or dst_height < 0: raise ValueError('''Destination width/height should be > 0''' ) lowercase : Union[str, Any] =img lowercase : Union[str, Any] =img.shape[1] lowercase : str =img.shape[0] lowercase : Union[str, Any] =dst_width lowercase : str =dst_height lowercase : str =self.src_w / self.dst_w lowercase : Optional[Any] =self.src_h / self.dst_h lowercase : int =( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' for i in range(self.dst_h ): for j in range(self.dst_w ): lowercase : List[Any] =self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )] def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_x * x ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' return int(self.ratio_y * y ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 800, 600 UpperCamelCase_ = imread("""image_data/lena.jpg""", 1) UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output ) waitKey(0) destroyAllWindows()
92
0
def lowerCamelCase__ ( __lowerCAmelCase : int ): """simple docstring""" if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowerCAmelCase_ = F"""Input value of [number={number}] must be an integer""" raise TypeError(__lowerCAmelCase ) if number < 1: lowerCAmelCase_ = F"""Input value of [number={number}] must be > 0""" raise ValueError(__lowerCAmelCase ) lowerCAmelCase_ = 1 for i in range(1 , __lowerCAmelCase ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
290
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Any =0.0_0 lowercase : Tuple =0 for resistor in resistors: if resistor <= 0: lowercase : Dict =f'''Resistor at index {index} has a negative or zero value!''' raise ValueError(__magic_name__ ) first_sum += 1 / float(__magic_name__ ) index += 1 return 1 / first_sum def _lowerCAmelCase ( __magic_name__ : list[float] ) -> float: lowercase : Optional[Any] =0.0_0 lowercase : int =0 for resistor in resistors: sum_r += resistor if resistor < 0: lowercase : Tuple =f'''Resistor at index {index} has a negative value!''' raise ValueError(__magic_name__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
92
0
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer A__ : Optional[int] = logging.get_logger(__name__) A__ : Any = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } A__ : Tuple = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } A__ : Union[str, Any] = { '''facebook/blenderbot_small-90M''': 5_1_2, } class snake_case__ ( lowercase__ ): A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = BlenderbotSmallTokenizer def __init__( self : Tuple , __a : Union[str, Any]=None , __a : Optional[int]=None , __a : Optional[Any]="<|endoftext|>" , __a : Dict="<|endoftext|>" , __a : List[str]="<|endoftext|>" , __a : List[Any]=False , __a : Optional[Any]=True , **__a : Optional[int] , ) -> Optional[int]: '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , ) __snake_case : int = add_prefix_space def A_ ( self : Any , __a : str , __a : Tuple=None ) -> Union[str, Any]: '''simple docstring''' __snake_case : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A_ ( self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> Optional[int]: '''simple docstring''' __snake_case : Optional[Any] = [self.sep_token_id] __snake_case : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
286
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""", """self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""", """self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""", """self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""", """self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""", """self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""", """self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""", """self_attn.rotary_emb""": """encoder.embed_positions""", """self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""", """conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""", """conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""", """conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""", """conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""", """conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""", """ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""", """ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""", """ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""", """ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""", """ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""", """ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } UpperCamelCase_ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> str: for attribute in key.split('''.''' ): lowercase : Tuple =getattr(__magic_name__ , __magic_name__ ) if weight_type is not None: lowercase : Optional[int] =getattr(__magic_name__ , __magic_name__ ).shape else: lowercase : List[Any] =hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase : Any =value elif weight_type == "weight_g": lowercase : List[Any] =value elif weight_type == "weight_v": lowercase : Union[str, Any] =value elif weight_type == "bias": lowercase : Tuple =value elif weight_type == "running_mean": lowercase : Union[str, Any] =value elif weight_type == "running_var": lowercase : str =value elif weight_type == "num_batches_tracked": lowercase : Tuple =value elif weight_type == "inv_freq": lowercase : Optional[Any] =value else: lowercase : Tuple =value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]: lowercase : Optional[int] =[] lowercase : Tuple =fairseq_model.state_dict() lowercase : List[Any] =hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): lowercase : Tuple =False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , ) lowercase : List[Any] =True else: for key, mapped_key in MAPPING.items(): lowercase : Optional[int] ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowercase : Union[str, Any] =True if "*" in mapped_key: lowercase : Optional[int] =name.split(__magic_name__ )[0].split('''.''' )[-2] lowercase : List[str] =mapped_key.replace('''*''' , __magic_name__ ) if "pos_bias_u" in name: lowercase : Optional[Any] =None elif "pos_bias_v" in name: lowercase : Union[str, Any] =None elif "weight_g" in name: lowercase : Any ='''weight_g''' elif "weight_v" in name: lowercase : Tuple ='''weight_v''' elif "bias" in name: lowercase : Optional[int] ='''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase : Optional[int] ='''weight''' elif "running_mean" in name: lowercase : Union[str, Any] ='''running_mean''' elif "inv_freq" in name: lowercase : Any ='''inv_freq''' elif "running_var" in name: lowercase : Tuple ='''running_var''' elif "num_batches_tracked" in name: lowercase : Dict ='''num_batches_tracked''' else: lowercase : str =None set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) continue if not is_used: unused_weights.append(__magic_name__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> int: lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1] lowercase : Any =name.split('''.''' ) lowercase : List[str] =int(items[0] ) lowercase : Union[str, Any] =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase : Union[str, Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase : Optional[Any] =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) lowercase : Optional[int] =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase : str =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__magic_name__ ) @torch.no_grad() def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=True ) -> Union[str, Any]: if config_path is not None: lowercase : Optional[Any] =WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' ) else: lowercase : Optional[int] =WavaVecaConformerConfig() if "rope" in checkpoint_path: lowercase : Dict ='''rotary''' if is_finetuned: if dict_path: lowercase : Optional[Any] =Dictionary.load(__magic_name__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase : str =target_dict.pad_index lowercase : Union[str, Any] =target_dict.bos_index lowercase : Any =target_dict.eos_index lowercase : Tuple =len(target_dict.symbols ) lowercase : str =os.path.join(__magic_name__ , '''vocab.json''' ) if not os.path.isdir(__magic_name__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) ) return os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowercase : Dict =target_dict.indices # fairseq has the <pad> and <s> switched lowercase : str =0 lowercase : List[Any] =1 with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(__magic_name__ , __magic_name__ ) lowercase : List[str] =WavaVecaCTCTokenizer( __magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , ) lowercase : Optional[int] =True if config.feat_extract_norm == '''layer''' else False lowercase : str =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , ) lowercase : Tuple =WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ ) processor.save_pretrained(__magic_name__ ) lowercase : str =WavaVecaConformerForCTC(__magic_name__ ) else: lowercase : Tuple =WavaVecaConformerForPreTraining(__magic_name__ ) if is_finetuned: lowercase , lowercase , lowercase : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: lowercase : Dict =argparse.Namespace(task='''audio_pretraining''' ) lowercase : Optional[int] =fairseq.tasks.setup_task(__magic_name__ ) lowercase , lowercase , lowercase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ ) lowercase : List[Any] =model[0].eval() recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned ) hf_wavavec.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCamelCase_ = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
92
0
from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case : List[Any] = logging.get_logger(__name__) snake_case : str = { 'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json', } class __lowercase ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = "switch_transformers" SCREAMING_SNAKE_CASE : int = ["past_key_values"] SCREAMING_SNAKE_CASE : Any = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self , A_=32128 , A_=768 , A_=64 , A_=2048 , A_=64 , A_=12 , A_=3 , A_=12 , A_=3 , A_=12 , A_=8 , A_=False , A_=0.01 , A_="float32" , A_=False , A_=32 , A_=128 , A_=0.1 , A_=1e-6 , A_=0.001 , A_=0.001 , A_=1.0 , A_="relu" , A_=True , A_=False , A_=True , A_=0 , A_=1 , **A_ , )-> Optional[Any]: _SCREAMING_SNAKE_CASE = vocab_size _SCREAMING_SNAKE_CASE = d_model _SCREAMING_SNAKE_CASE = d_kv _SCREAMING_SNAKE_CASE = d_ff _SCREAMING_SNAKE_CASE = num_sparse_encoder_layers _SCREAMING_SNAKE_CASE = num_layers _SCREAMING_SNAKE_CASE = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry _SCREAMING_SNAKE_CASE = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: _SCREAMING_SNAKE_CASE = self.num_layers // self.num_sparse_encoder_layers else: _SCREAMING_SNAKE_CASE = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: _SCREAMING_SNAKE_CASE = self.num_decoder_layers // self.num_sparse_decoder_layers else: _SCREAMING_SNAKE_CASE = self.num_decoder_layers # HACK: this will create 0 sparse layers _SCREAMING_SNAKE_CASE = num_heads _SCREAMING_SNAKE_CASE = num_experts _SCREAMING_SNAKE_CASE = expert_capacity _SCREAMING_SNAKE_CASE = router_bias _SCREAMING_SNAKE_CASE = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' ) _SCREAMING_SNAKE_CASE = router_dtype _SCREAMING_SNAKE_CASE = router_ignore_padding_tokens _SCREAMING_SNAKE_CASE = relative_attention_num_buckets _SCREAMING_SNAKE_CASE = relative_attention_max_distance _SCREAMING_SNAKE_CASE = dropout_rate _SCREAMING_SNAKE_CASE = layer_norm_epsilon _SCREAMING_SNAKE_CASE = initializer_factor _SCREAMING_SNAKE_CASE = feed_forward_proj _SCREAMING_SNAKE_CASE = use_cache _SCREAMING_SNAKE_CASE = add_router_probs _SCREAMING_SNAKE_CASE = router_z_loss_coef _SCREAMING_SNAKE_CASE = router_aux_loss_coef _SCREAMING_SNAKE_CASE = self.feed_forward_proj.split('-' ) _SCREAMING_SNAKE_CASE = act_info[-1] _SCREAMING_SNAKE_CASE = act_info[0] == '''gated''' if len(UpperCAmelCase__ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase__ ) > 2: raise ValueError( F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": _SCREAMING_SNAKE_CASE = '''gelu_new''' super().__init__( pad_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ , )
605
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def _lowerCAmelCase ( __magic_name__ : jnp.ndarray , __magic_name__ : int , __magic_name__ : float = 1 , __magic_name__ : float = 1 , __magic_name__ : float = 1.0E4 , __magic_name__ : bool = False , __magic_name__ : float = 1.0 , ) -> jnp.ndarray: assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even''' lowercase : int =float(embedding_dim // 2 ) lowercase : Optional[int] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) lowercase : Any =min_timescale * jnp.exp(jnp.arange(__magic_name__ , dtype=jnp.floataa ) * -log_timescale_increment ) lowercase : List[Any] =jnp.expand_dims(__magic_name__ , 1 ) * jnp.expand_dims(__magic_name__ , 0 ) # scale embeddings lowercase : Tuple =scale * emb if flip_sin_to_cos: lowercase : Dict =jnp.concatenate([jnp.cos(__magic_name__ ), jnp.sin(__magic_name__ )] , axis=1 ) else: lowercase : Any =jnp.concatenate([jnp.sin(__magic_name__ ), jnp.cos(__magic_name__ )] , axis=1 ) lowercase : List[str] =jnp.reshape(__magic_name__ , [jnp.shape(__magic_name__ )[0], embedding_dim] ) return signal class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = jnp.floataa @nn.compact def __call__( self : Tuple , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : List[Any] =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCAmelCase__ ) lowercase : Any =nn.silu(UpperCAmelCase__ ) lowercase : int =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCAmelCase__ ) return temb class __SCREAMING_SNAKE_CASE ( nn.Module ): lowerCamelCase_ = 32 lowerCamelCase_ = False lowerCamelCase_ = 1 @nn.compact def __call__( self : int , UpperCAmelCase__ : str ): '''simple docstring''' return get_sinusoidal_embeddings( UpperCAmelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
92
0
"""simple docstring""" class lowerCAmelCase__ : def __init__( self ): '''simple docstring''' A__ = '''''' A__ = '''''' A__ = [] def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: A__ = self.__min_dist_top_down_dp(m - 1 , n - 1 ) else: A__ = self.__min_dist_top_down_dp(UpperCAmelCase__ , n - 1 ) A__ = self.__min_dist_top_down_dp(m - 1 , UpperCAmelCase__ ) A__ = self.__min_dist_top_down_dp(m - 1 , n - 1 ) A__ = 1 + min(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return self.dp[m][n] def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' A__ = worda A__ = worda A__ = [[-1 for _ in range(len(UpperCAmelCase__ ) )] for _ in range(len(UpperCAmelCase__ ) )] return self.__min_dist_top_down_dp(len(UpperCAmelCase__ ) - 1 , len(UpperCAmelCase__ ) - 1 ) def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' A__ = worda A__ = worda A__ = len(UpperCAmelCase__ ) A__ = len(UpperCAmelCase__ ) A__ = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty A__ = j elif j == 0: # second string is empty A__ = i elif worda[i - 1] == worda[j - 1]: # last characters are equal A__ = self.dp[i - 1][j - 1] else: A__ = self.dp[i][j - 1] A__ = self.dp[i - 1][j] A__ = self.dp[i - 1][j - 1] A__ = 1 + min(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return self.dp[m][n] if __name__ == "__main__": __UpperCAmelCase =EditDistance() print("""****************** Testing Edit Distance DP Algorithm ******************""") print() __UpperCAmelCase =input("""Enter the first string: """).strip() __UpperCAmelCase =input("""Enter the second string: """).strip() print() print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''') print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''') print() print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
337
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) # TODO Update this UpperCamelCase_ = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'esm' def __init__( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=1026 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int , ): '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase : Any =vocab_size lowercase : List[Any] =hidden_size lowercase : Any =num_hidden_layers lowercase : Optional[Any] =num_attention_heads lowercase : Tuple =intermediate_size lowercase : int =hidden_dropout_prob lowercase : Dict =attention_probs_dropout_prob lowercase : Optional[int] =max_position_embeddings lowercase : Union[str, Any] =initializer_range lowercase : Tuple =layer_norm_eps lowercase : Union[str, Any] =position_embedding_type lowercase : List[Any] =use_cache lowercase : Dict =emb_layer_norm_before lowercase : Optional[Any] =token_dropout lowercase : Union[str, Any] =is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) lowercase : Any =EsmFoldConfig() elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase : Optional[int] =EsmFoldConfig(**UpperCAmelCase__ ) lowercase : Union[str, Any] =esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) lowercase : int =get_default_vocab_list() else: lowercase : Tuple =vocab_list else: lowercase : Union[str, Any] =None lowercase : Dict =None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Union[str, Any] =super().to_dict() if isinstance(self.esmfold_config , UpperCAmelCase__ ): lowercase : Optional[Any] =self.esmfold_config.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = None lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = 0 lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' if self.trunk is None: lowercase : str =TrunkConfig() elif isinstance(self.trunk , UpperCAmelCase__ ): lowercase : int =TrunkConfig(**self.trunk ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : str =asdict(self ) lowercase : Union[str, Any] =self.trunk.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 48 lowerCamelCase_ = 10_24 lowerCamelCase_ = 1_28 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 32 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = False lowerCamelCase_ = 4 lowerCamelCase_ = 1_28 lowerCamelCase_ = None def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' if self.structure_module is None: lowercase : Any =StructureModuleConfig() elif isinstance(self.structure_module , UpperCAmelCase__ ): lowercase : Union[str, Any] =StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' ) lowercase : str =self.sequence_state_dim // self.sequence_head_width lowercase : int =self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' ) if self.dropout >= 0.4: raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : List[Any] =asdict(self ) lowercase : Any =self.structure_module.to_dict() return output @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = 3_84 lowerCamelCase_ = 1_28 lowerCamelCase_ = 16 lowerCamelCase_ = 1_28 lowerCamelCase_ = 12 lowerCamelCase_ = 4 lowerCamelCase_ = 8 lowerCamelCase_ = 0.1 lowerCamelCase_ = 8 lowerCamelCase_ = 1 lowerCamelCase_ = 2 lowerCamelCase_ = 7 lowerCamelCase_ = 10 lowerCamelCase_ = 1E-8 lowerCamelCase_ = 1E5 def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return asdict(self ) def _lowerCAmelCase ( ) -> Optional[int]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
92
0
"""simple docstring""" from abc import ABC, abstractmethod from typing import List, Optional class snake_case ( lowercase__ ): def __init__( self : str)-> Optional[Any]: '''simple docstring''' self.test() def lowercase_ ( self : Tuple)-> Optional[int]: '''simple docstring''' __lowerCAmelCase: int = 0 __lowerCAmelCase: List[Any] = False while not completed: if counter == 1: self.reset() __lowerCAmelCase: Dict = self.advance() if not self.does_advance(UpperCAmelCase__): raise Exception( "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.") __lowerCAmelCase: str = self.update(UpperCAmelCase__) counter += 1 if counter > 1_0_0_0_0: raise Exception("update() does not fulfill the constraint.") if self.remaining() != 0: raise Exception("Custom Constraint is not defined correctly.") @abstractmethod def lowercase_ ( self : Any)-> Any: '''simple docstring''' raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called.") @abstractmethod def lowercase_ ( self : List[str] , UpperCamelCase__ : int)-> List[str]: '''simple docstring''' raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called.") @abstractmethod def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : int)-> Any: '''simple docstring''' raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called.") @abstractmethod def lowercase_ ( self : Optional[int])-> List[Any]: '''simple docstring''' raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called.") @abstractmethod def lowercase_ ( self : List[str])-> Optional[Any]: '''simple docstring''' raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called.") @abstractmethod def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[Any]=False)-> Dict: '''simple docstring''' raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called.") class snake_case ( lowercase__ ): def __init__( self : int , UpperCamelCase__ : List[int])-> Optional[int]: '''simple docstring''' super(UpperCAmelCase__ , self).__init__() if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) or len(UpperCAmelCase__) == 0: raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}.") if any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__) or token_id < 0) for token_id in token_ids): raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.") __lowerCAmelCase: Union[str, Any] = token_ids __lowerCAmelCase: Tuple = len(self.token_ids) __lowerCAmelCase: Tuple = -1 # the index of the currently fulfilled step __lowerCAmelCase: int = False def lowercase_ ( self : int)-> List[str]: '''simple docstring''' if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def lowercase_ ( self : str , UpperCamelCase__ : int)-> Dict: '''simple docstring''' if not isinstance(UpperCAmelCase__ , UpperCAmelCase__): raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__)}") if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def lowercase_ ( self : str , UpperCamelCase__ : int)-> str: '''simple docstring''' if not isinstance(UpperCAmelCase__ , UpperCAmelCase__): raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__)}") __lowerCAmelCase: Any = False __lowerCAmelCase: Optional[int] = False __lowerCAmelCase: int = False if self.does_advance(UpperCAmelCase__): self.fulfilled_idx += 1 __lowerCAmelCase: Any = True if self.fulfilled_idx == (self.seqlen - 1): __lowerCAmelCase: List[str] = True __lowerCAmelCase: List[Any] = completed else: # failed to make progress. __lowerCAmelCase: Union[str, Any] = True self.reset() return stepped, completed, reset def lowercase_ ( self : Any)-> List[Any]: '''simple docstring''' __lowerCAmelCase: int = False __lowerCAmelCase: Tuple = 0 def lowercase_ ( self : Union[str, Any])-> Any: '''simple docstring''' return self.seqlen - (self.fulfilled_idx + 1) def lowercase_ ( self : int , UpperCamelCase__ : int=False)-> Dict: '''simple docstring''' __lowerCAmelCase: Dict = PhrasalConstraint(self.token_ids) if stateful: __lowerCAmelCase: Union[str, Any] = self.seqlen __lowerCAmelCase: Optional[Any] = self.fulfilled_idx __lowerCAmelCase: List[str] = self.completed return new_constraint class snake_case : def __init__( self : Any , UpperCamelCase__ : List[List[int]] , UpperCamelCase__ : Dict=True)-> int: '''simple docstring''' __lowerCAmelCase: str = max([len(UpperCAmelCase__) for one in nested_token_ids]) __lowerCAmelCase: Any = {} for token_ids in nested_token_ids: __lowerCAmelCase: Any = root for tidx, token_id in enumerate(UpperCAmelCase__): if token_id not in level: __lowerCAmelCase: int = {} __lowerCAmelCase: Union[str, Any] = level[token_id] if no_subsets and self.has_subsets(UpperCAmelCase__ , UpperCAmelCase__): raise ValueError( "Each list in `nested_token_ids` can\'t be a complete subset of another list, but is" f" {nested_token_ids}.") __lowerCAmelCase: Dict = root def lowercase_ ( self : int , UpperCamelCase__ : Dict)-> Any: '''simple docstring''' __lowerCAmelCase: Any = self.trie for current_token in current_seq: __lowerCAmelCase: List[str] = start[current_token] __lowerCAmelCase: Optional[Any] = list(start.keys()) return next_tokens def lowercase_ ( self : str , UpperCamelCase__ : List[str])-> Any: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = self.next_tokens(UpperCAmelCase__) return len(UpperCAmelCase__) == 0 def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Optional[int])-> int: '''simple docstring''' __lowerCAmelCase: Any = list(root.values()) if len(UpperCAmelCase__) == 0: return 1 else: return sum([self.count_leaves(UpperCAmelCase__) for nn in next_nodes]) def lowercase_ ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict)-> Any: '''simple docstring''' __lowerCAmelCase: List[str] = self.count_leaves(UpperCAmelCase__) return len(UpperCAmelCase__) != leaf_count class snake_case ( lowercase__ ): def __init__( self : List[str] , UpperCamelCase__ : List[List[int]])-> Dict: '''simple docstring''' super(UpperCAmelCase__ , self).__init__() if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) or len(UpperCAmelCase__) == 0: raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.") if any(not isinstance(UpperCAmelCase__ , UpperCAmelCase__) for token_ids in nested_token_ids): raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.") if any( any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__) or token_id < 0) for token_id in token_ids) for token_ids in nested_token_ids): raise ValueError( f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.") __lowerCAmelCase: Any = DisjunctiveTrie(UpperCAmelCase__) __lowerCAmelCase: Tuple = nested_token_ids __lowerCAmelCase: Dict = self.trie.max_height __lowerCAmelCase: List[str] = [] __lowerCAmelCase: str = False def lowercase_ ( self : Optional[int])-> Optional[int]: '''simple docstring''' __lowerCAmelCase: Any = self.trie.next_tokens(self.current_seq) if len(UpperCAmelCase__) == 0: return None else: return token_list def lowercase_ ( self : int , UpperCamelCase__ : int)-> Optional[int]: '''simple docstring''' if not isinstance(UpperCAmelCase__ , UpperCAmelCase__): raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__)}") __lowerCAmelCase: Union[str, Any] = self.trie.next_tokens(self.current_seq) return token_id in next_tokens def lowercase_ ( self : Tuple , UpperCamelCase__ : int)-> Optional[Any]: '''simple docstring''' if not isinstance(UpperCAmelCase__ , UpperCAmelCase__): raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__)}") __lowerCAmelCase: Any = False __lowerCAmelCase: Any = False __lowerCAmelCase: Optional[Any] = False if self.does_advance(UpperCAmelCase__): self.current_seq.append(UpperCAmelCase__) __lowerCAmelCase: Dict = True else: __lowerCAmelCase: Tuple = True self.reset() __lowerCAmelCase: Union[str, Any] = self.trie.reached_leaf(self.current_seq) __lowerCAmelCase: int = completed return stepped, completed, reset def lowercase_ ( self : Any)-> List[str]: '''simple docstring''' __lowerCAmelCase: int = False __lowerCAmelCase: Tuple = [] def lowercase_ ( self : Dict)-> Any: '''simple docstring''' if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq) def lowercase_ ( self : Dict , UpperCamelCase__ : Any=False)-> int: '''simple docstring''' __lowerCAmelCase: Dict = DisjunctiveConstraint(self.token_ids) if stateful: __lowerCAmelCase: Union[str, Any] = self.seqlen __lowerCAmelCase: int = self.current_seq __lowerCAmelCase: Dict = self.completed return new_constraint class snake_case : def __init__( self : int , UpperCamelCase__ : List[Constraint])-> List[Any]: '''simple docstring''' __lowerCAmelCase: List[str] = constraints # max # of steps required to fulfill a given constraint __lowerCAmelCase: str = max([c.seqlen for c in constraints]) __lowerCAmelCase: Any = len(UpperCAmelCase__) __lowerCAmelCase: Union[str, Any] = False self.init_state() def lowercase_ ( self : str)-> List[Any]: '''simple docstring''' __lowerCAmelCase: List[str] = [] __lowerCAmelCase: Optional[Any] = None __lowerCAmelCase: List[Any] = [constraint.copy(stateful=UpperCAmelCase__) for constraint in self.constraints] def lowercase_ ( self : List[str])-> List[str]: '''simple docstring''' __lowerCAmelCase: List[Any] = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints) * self.max_seqlen) + add def lowercase_ ( self : Union[str, Any])-> Optional[Any]: '''simple docstring''' __lowerCAmelCase: int = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" __lowerCAmelCase: List[str] = constraint.advance() if isinstance(UpperCAmelCase__ , UpperCAmelCase__): token_list.append(UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): token_list.extend(UpperCAmelCase__) else: __lowerCAmelCase: str = self.inprogress_constraint.advance() if isinstance(UpperCAmelCase__ , UpperCAmelCase__): token_list.append(UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): token_list.extend(UpperCAmelCase__) if len(UpperCAmelCase__) == 0: return None else: return token_list def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Optional[List[int]])-> int: '''simple docstring''' self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint __lowerCAmelCase: Optional[Any] = self.add(UpperCAmelCase__) # the entire list of constraints are fulfilled if self.completed: break def lowercase_ ( self : List[str] , UpperCamelCase__ : int)-> int: '''simple docstring''' if not isinstance(UpperCAmelCase__ , UpperCAmelCase__): raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`.") __lowerCAmelCase: List[Any] = False, False if self.completed: __lowerCAmelCase: Tuple = True __lowerCAmelCase: Union[str, Any] = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state __lowerCAmelCase: Union[str, Any] = self.inprogress_constraint.update(UpperCAmelCase__) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase__)) __lowerCAmelCase: List[str] = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint) __lowerCAmelCase: Optional[Any] = None if len(self.pending_constraints) == 0: # we're done! __lowerCAmelCase: int = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints): if pending_constraint.does_advance(UpperCAmelCase__): __lowerCAmelCase: List[Any] = pending_constraint.update(UpperCAmelCase__) if not stepped: raise Exception( "`constraint.update(token_id)` is not yielding incremental progress, " "even though `constraint.does_advance(token_id)` is true.") if complete: self.complete_constraints.append(UpperCAmelCase__) __lowerCAmelCase: Union[str, Any] = None if not complete and stepped: __lowerCAmelCase: Any = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". __lowerCAmelCase: int = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. __lowerCAmelCase: List[Any] = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def lowercase_ ( self : int , UpperCamelCase__ : Optional[int]=True)-> Union[str, Any]: '''simple docstring''' __lowerCAmelCase: int = ConstraintListState(self.constraints) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: __lowerCAmelCase: Tuple = [ constraint.copy(stateful=UpperCAmelCase__) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: __lowerCAmelCase: Dict = self.inprogress_constraint.copy(stateful=UpperCAmelCase__) __lowerCAmelCase: int = [constraint.copy() for constraint in self.pending_constraints] return new_state
346
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCamelCase_ = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple: config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def _lowerCAmelCase ( __magic_name__ : int ) -> Any: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Any ) -> Any: from transformers.testing_utils import pytest_terminal_summary_main lowercase : Optional[Any] =terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]: # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: lowercase : Optional[int] =0 # Doctest custom flag to ignore output. UpperCamelCase_ = doctest.register_optionflag("""IGNORE_RESULT""") UpperCamelCase_ = doctest.OutputChecker class __SCREAMING_SNAKE_CASE ( lowercase__ ): def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ): '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_ = CustomOutputChecker UpperCamelCase_ = HfDoctestModule UpperCamelCase_ = HfDocTestParser
92
0
import numpy as np def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowercase = int(np.ceil((x_end - xa) / h ) ) __lowercase = np.zeros((n + 1,) ) __lowercase = ya __lowercase = xa for k in range(_UpperCamelCase ): __lowercase = f(_UpperCamelCase , y[k] ) __lowercase = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) __lowercase = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) __lowercase = f(x + h , y[k] + h * ka ) __lowercase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
639
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCamelCase_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = ['pixel_values'] def __init__( self : List[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 8 , **UpperCAmelCase__ : str , ): '''simple docstring''' super().__init__(**UpperCAmelCase__ ) lowercase : Union[str, Any] =do_rescale lowercase : List[Any] =rescale_factor lowercase : Tuple =do_pad lowercase : List[str] =pad_size def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] ): '''simple docstring''' return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' lowercase , lowercase : Union[str, Any] =get_image_size(UpperCAmelCase__ ) lowercase : Tuple =(old_height // size + 1) * size - old_height lowercase : Tuple =(old_width // size + 1) * size - old_width return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : Optional[Any] , ): '''simple docstring''' lowercase : int =do_rescale if do_rescale is not None else self.do_rescale lowercase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : int =do_pad if do_pad is not None else self.do_pad lowercase : List[Any] =pad_size if pad_size is not None else self.pad_size lowercase : Any =make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. lowercase : Dict =[to_numpy_array(UpperCAmelCase__ ) for image in images] if do_rescale: lowercase : Tuple =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images] if do_pad: lowercase : Union[str, Any] =[self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images] lowercase : Dict =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images] lowercase : Any ={'''pixel_values''': images} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
92
0
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase_ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=2 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=36 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=6 , __UpperCAmelCase=6 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=1_000 , ): SCREAMING_SNAKE_CASE_ : int =parent SCREAMING_SNAKE_CASE_ : str =batch_size SCREAMING_SNAKE_CASE_ : Optional[Any] =num_channels SCREAMING_SNAKE_CASE_ : Optional[int] =image_size SCREAMING_SNAKE_CASE_ : Dict =patch_size SCREAMING_SNAKE_CASE_ : Union[str, Any] =text_seq_length SCREAMING_SNAKE_CASE_ : int =is_training SCREAMING_SNAKE_CASE_ : int =use_input_mask SCREAMING_SNAKE_CASE_ : str =use_token_type_ids SCREAMING_SNAKE_CASE_ : str =use_labels SCREAMING_SNAKE_CASE_ : Any =vocab_size SCREAMING_SNAKE_CASE_ : Optional[Any] =hidden_size SCREAMING_SNAKE_CASE_ : str =num_hidden_layers SCREAMING_SNAKE_CASE_ : int =num_attention_heads SCREAMING_SNAKE_CASE_ : Tuple =intermediate_size SCREAMING_SNAKE_CASE_ : Dict =hidden_act SCREAMING_SNAKE_CASE_ : Optional[int] =hidden_dropout_prob SCREAMING_SNAKE_CASE_ : List[Any] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : int =max_position_embeddings SCREAMING_SNAKE_CASE_ : List[Any] =type_vocab_size SCREAMING_SNAKE_CASE_ : Union[str, Any] =type_sequence_label_size SCREAMING_SNAKE_CASE_ : List[Any] =initializer_range SCREAMING_SNAKE_CASE_ : Tuple =coordinate_size SCREAMING_SNAKE_CASE_ : int =shape_size SCREAMING_SNAKE_CASE_ : List[str] =num_labels SCREAMING_SNAKE_CASE_ : Any =num_choices SCREAMING_SNAKE_CASE_ : str =scope SCREAMING_SNAKE_CASE_ : Tuple =range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) SCREAMING_SNAKE_CASE_ : Optional[Any] =text_seq_length SCREAMING_SNAKE_CASE_ : int =(image_size // patch_size) ** 2 + 1 SCREAMING_SNAKE_CASE_ : List[str] =self.text_seq_length + self.image_seq_length def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : Union[str, Any] =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE_ : List[str] =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: SCREAMING_SNAKE_CASE_ : Union[str, Any] =bbox[i, j, 3] SCREAMING_SNAKE_CASE_ : Any =bbox[i, j, 1] SCREAMING_SNAKE_CASE_ : List[str] =t if bbox[i, j, 2] < bbox[i, j, 0]: SCREAMING_SNAKE_CASE_ : Dict =bbox[i, j, 2] SCREAMING_SNAKE_CASE_ : Dict =bbox[i, j, 0] SCREAMING_SNAKE_CASE_ : Any =t SCREAMING_SNAKE_CASE_ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE_ : str =None if self.use_input_mask: SCREAMING_SNAKE_CASE_ : Optional[Any] =random_attention_mask([self.batch_size, self.text_seq_length] ) SCREAMING_SNAKE_CASE_ : str =None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ : Optional[int] =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE_ : List[Any] =None SCREAMING_SNAKE_CASE_ : List[str] =None if self.use_labels: SCREAMING_SNAKE_CASE_ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ : Dict =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): SCREAMING_SNAKE_CASE_ : Optional[int] =LayoutLMvaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() # text + image SCREAMING_SNAKE_CASE_ : Union[str, Any] =model(UpperCAmelCase__ , pixel_values=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Dict =model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE_ : str =model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[Any] =model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only SCREAMING_SNAKE_CASE_ : Dict =model(UpperCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only SCREAMING_SNAKE_CASE_ : Optional[int] =model(pixel_values=UpperCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): SCREAMING_SNAKE_CASE_ : Dict =self.num_labels SCREAMING_SNAKE_CASE_ : Dict =LayoutLMvaForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : Dict =model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): SCREAMING_SNAKE_CASE_ : List[Any] =self.num_labels SCREAMING_SNAKE_CASE_ : Any =LayoutLMvaForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : List[str] =model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): SCREAMING_SNAKE_CASE_ : Tuple =LayoutLMvaForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : Optional[int] =model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : int =self.prepare_config_and_inputs() ( SCREAMING_SNAKE_CASE_ ) : Dict =config_and_inputs SCREAMING_SNAKE_CASE_ : List[Any] ={ '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class lowerCAmelCase_ ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' _lowercase = False _lowercase = False _lowercase = False _lowercase = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) _lowercase = ( {'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel} if is_torch_available() else {} ) def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): return True def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : int =LayoutLMvaModelTester(self ) SCREAMING_SNAKE_CASE_ : List[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): SCREAMING_SNAKE_CASE_ : Optional[int] =copy.deepcopy(UpperCAmelCase__ ) if model_class in get_values(UpperCAmelCase__ ): SCREAMING_SNAKE_CASE_ : Dict ={ k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(UpperCAmelCase__ , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(UpperCAmelCase__ ): SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) elif model_class in get_values(UpperCAmelCase__ ): SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Tuple =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) elif model_class in [ *get_values(UpperCAmelCase__ ), ]: SCREAMING_SNAKE_CASE_ : Dict =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) elif model_class in [ *get_values(UpperCAmelCase__ ), ]: SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase__ , ) return inputs_dict def __lowerCamelCase ( self ): self.config_tester.run_common_tests() def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : Dict =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE_ : Union[str, Any] =type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) @slow def __lowerCamelCase ( self ): for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Union[str, Any] =LayoutLMvaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def __lowerCamelCase ( self ): return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ ) if is_vision_available() else None @slow def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : Optional[Any] =LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.default_image_processor SCREAMING_SNAKE_CASE_ : str =prepare_img() SCREAMING_SNAKE_CASE_ : int =image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).pixel_values.to(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.tensor([[1, 2]] ) SCREAMING_SNAKE_CASE_ : Any =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass SCREAMING_SNAKE_CASE_ : Tuple =model( input_ids=input_ids.to(UpperCAmelCase__ ) , bbox=bbox.to(UpperCAmelCase__ ) , pixel_values=pixel_values.to(UpperCAmelCase__ ) , ) # verify the logits SCREAMING_SNAKE_CASE_ : str =torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[int] =torch.tensor( [[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
220
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""MBartTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """MBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """MBartForCausalLM""", """MBartForConditionalGeneration""", """MBartForQuestionAnswering""", """MBartForSequenceClassification""", """MBartModel""", """MBartPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """TFMBartForConditionalGeneration""", """TFMBartModel""", """TFMBartPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """FlaxMBartForConditionalGeneration""", """FlaxMBartForQuestionAnswering""", """FlaxMBartForSequenceClassification""", """FlaxMBartModel""", """FlaxMBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class SCREAMING_SNAKE_CASE_ : """simple docstring""" def __init__( self :Any, snake_case :Tuple, snake_case :int=13, snake_case :Dict=30, snake_case :Any=2, snake_case :Any=3, snake_case :Any=True, snake_case :List[str]=True, snake_case :str=32, snake_case :Optional[int]=2, snake_case :Dict=4, snake_case :Dict=37, snake_case :List[str]="gelu", snake_case :Union[str, Any]=0.1, snake_case :Any=0.1, snake_case :Union[str, Any]=10, snake_case :Any=0.0_2, snake_case :Dict=3, snake_case :int=None, snake_case :Union[str, Any]=2, ): """simple docstring""" _lowercase =parent _lowercase =batch_size _lowercase =image_size _lowercase =patch_size _lowercase =num_channels _lowercase =is_training _lowercase =use_labels _lowercase =hidden_size _lowercase =num_hidden_layers _lowercase =num_attention_heads _lowercase =intermediate_size _lowercase =hidden_act _lowercase =hidden_dropout_prob _lowercase =attention_probs_dropout_prob _lowercase =type_sequence_label_size _lowercase =initializer_range _lowercase =scope _lowercase =encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) _lowercase =(image_size // patch_size) ** 2 _lowercase =num_patches + 2 def UpperCamelCase__ ( self :List[str]): """simple docstring""" _lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _lowercase =None if self.use_labels: _lowercase =ids_tensor([self.batch_size], self.type_sequence_label_size) _lowercase =self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self :Union[str, Any]): """simple docstring""" return DeiTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def UpperCamelCase__ ( self :List[str], snake_case :Dict, snake_case :List[Any], snake_case :Any): """simple docstring""" _lowercase =TFDeiTModel(config=UpperCAmelCase__) _lowercase =model(UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def UpperCamelCase__ ( self :Dict, snake_case :Tuple, snake_case :str, snake_case :Union[str, Any]): """simple docstring""" _lowercase =TFDeiTForMaskedImageModeling(config=UpperCAmelCase__) _lowercase =model(UpperCAmelCase__) self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images _lowercase =1 _lowercase =TFDeiTForMaskedImageModeling(UpperCAmelCase__) _lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) _lowercase =model(UpperCAmelCase__) self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size)) def UpperCamelCase__ ( self :Dict, snake_case :Union[str, Any], snake_case :Any, snake_case :List[str]): """simple docstring""" _lowercase =self.type_sequence_label_size _lowercase =TFDeiTForImageClassification(UpperCAmelCase__) _lowercase =model(UpperCAmelCase__, labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images _lowercase =1 _lowercase =TFDeiTForImageClassification(UpperCAmelCase__) _lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) _lowercase =model(UpperCAmelCase__, labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def UpperCamelCase__ ( self :Optional[int]): """simple docstring""" _lowercase =self.prepare_config_and_inputs() _lowercase =config_and_inputs _lowercase ={'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" __lowerCAmelCase : Optional[int] =( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) __lowerCAmelCase : Union[str, Any] =( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) __lowerCAmelCase : Optional[int] =False __lowerCAmelCase : Dict =False __lowerCAmelCase : Union[str, Any] =False __lowerCAmelCase : int =False def UpperCamelCase__ ( self :str): """simple docstring""" _lowercase =TFDeiTModelTester(self) _lowercase =ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=37) def UpperCamelCase__ ( self :int): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds') def UpperCamelCase__ ( self :str): """simple docstring""" pass def UpperCamelCase__ ( self :Tuple): """simple docstring""" _lowercase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase =model_class(UpperCAmelCase__) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer)) _lowercase =model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase__, tf.keras.layers.Dense)) def UpperCamelCase__ ( self :Union[str, Any]): """simple docstring""" _lowercase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase =model_class(UpperCAmelCase__) _lowercase =inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowercase =[*signature.parameters.keys()] _lowercase =['''pixel_values'''] self.assertListEqual(arg_names[:1], UpperCAmelCase__) def UpperCamelCase__ ( self :str): """simple docstring""" _lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) def UpperCamelCase__ ( self :str): """simple docstring""" _lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__) def UpperCamelCase__ ( self :Dict): """simple docstring""" _lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__) def UpperCamelCase__ ( self :Dict, snake_case :List[Any], snake_case :List[str], snake_case :int=False): """simple docstring""" _lowercase =super()._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__, return_labels=UpperCAmelCase__) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters: del inputs_dict["labels"] return inputs_dict @slow def UpperCamelCase__ ( self :List[str]): """simple docstring""" for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase =TFDeiTModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) def _snake_case () -> Optional[Any]: _lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCamelCase__ ( self :List[Any]): """simple docstring""" return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224') if is_vision_available() else None ) @slow def UpperCamelCase__ ( self :Dict): """simple docstring""" _lowercase =TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224') _lowercase =self.default_image_processor _lowercase =prepare_img() _lowercase =image_processor(images=UpperCAmelCase__, return_tensors='tf') # forward pass _lowercase =model(**UpperCAmelCase__) # verify the logits _lowercase =tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, UpperCAmelCase__) _lowercase =tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1]) self.assertTrue(np.allclose(outputs.logits[0, :3], UpperCAmelCase__, atol=1e-4))
181
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
92
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: __UpperCamelCase : Union[str, Any] = None __UpperCamelCase : int = logging.get_logger(__name__) __UpperCamelCase : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase : str = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } __UpperCamelCase : List[str] = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } __UpperCamelCase : Optional[Any] = '''▁''' class a ( lowercase__ ): snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = AlbertTokenizer def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case="[CLS]" , _snake_case="[SEP]" , _snake_case="<unk>" , _snake_case="[SEP]" , _snake_case="<pad>" , _snake_case="[CLS]" , _snake_case="[MASK]" , **_snake_case , ): """simple docstring""" lowerCAmelCase = ( AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ , normalized=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token ) super().__init__( UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , **UpperCAmelCase__ , ) lowerCAmelCase = do_lower_case lowerCAmelCase = remove_space lowerCAmelCase = keep_accents lowerCAmelCase = vocab_file lowerCAmelCase = False if not self.vocab_file else True def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(UpperCAmelCase__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return lowerCAmelCase = os.path.join( UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ): copyfile(self.vocab_file , UpperCAmelCase__ ) return (out_vocab_file,)
4
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCamelCase_ = logging.getLogger(__name__) def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]: return (preds == labels).mean() @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} ) lowerCamelCase_ = field(metadata={'help': 'Should contain the data files for the task.'} ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def _lowerCAmelCase ( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowercase , lowercase , lowercase : List[Any] =parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __magic_name__ ) # Set seed set_seed(training_args.seed ) try: lowercase : Any =processors[data_args.task_name]() lowercase : Optional[int] =processor.get_labels() lowercase : str =len(__magic_name__ ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase : List[str] =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__magic_name__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) lowercase : int =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase : Any =AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , ) # Get datasets lowercase : int =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowercase : Union[str, Any] =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=__magic_name__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(__magic_name__ : EvalPrediction ) -> Dict: lowercase : Dict =np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(__magic_name__ , p.label_ids )} # Data collator lowercase : List[str] =DataCollatorWithPadding(__magic_name__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowercase : Dict =Trainer( model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , data_collator=__magic_name__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowercase : Optional[Any] ={} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowercase : List[Any] =trainer.evaluate() lowercase : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(__magic_name__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , __magic_name__ , __magic_name__ ) writer.write('''%s = %s\n''' % (key, value) ) results.update(__magic_name__ ) return results def _lowerCAmelCase ( __magic_name__ : Any ) -> Optional[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
92
0
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset _snake_case = random.Random() def __lowerCamelCase ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ) -> Union[str, Any]: if rng is None: UpperCamelCase = global_rng UpperCamelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4_00 , SCREAMING_SNAKE_CASE__ : str=20_00 , SCREAMING_SNAKE_CASE__ : Dict=20_48 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_28 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : int=30 , SCREAMING_SNAKE_CASE__ : Dict=4_41_00 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = min_seq_length UpperCamelCase = max_seq_length UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase = spectrogram_length UpperCamelCase = feature_size UpperCamelCase = num_audio_channels UpperCamelCase = hop_length UpperCamelCase = chunk_length UpperCamelCase = sampling_rate def __lowerCAmelCase ( self : str ): """simple docstring""" return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ): """simple docstring""" def _flatten(SCREAMING_SNAKE_CASE__ : Optional[Any] ): return list(itertools.chain(*UpperCAmelCase__ ) ) if equal_length: UpperCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size UpperCamelCase = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCamelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _lowerCAmelCase ( lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict =TvltFeatureExtractor def __lowerCAmelCase ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = TvltFeatureExtractionTester(self ) def __lowerCAmelCase ( self : Tuple ): """simple docstring""" UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , 'spectrogram_length' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'feature_size' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'num_audio_channels' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'hop_length' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'chunk_length' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'sampling_rate' ) ) def __lowerCAmelCase ( self : str ): """simple docstring""" UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = feat_extract_first.save_pretrained(UpperCAmelCase__ )[0] check_json_file_has_correct_format(UpperCAmelCase__ ) UpperCamelCase = self.feature_extraction_class.from_pretrained(UpperCAmelCase__ ) UpperCamelCase = feat_extract_first.to_dict() UpperCamelCase = feat_extract_second.to_dict() UpperCamelCase = dict_first.pop('mel_filters' ) UpperCamelCase = dict_second.pop('mel_filters' ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __lowerCAmelCase ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = os.path.join(UpperCAmelCase__ , 'feat_extract.json' ) feat_extract_first.to_json_file(UpperCAmelCase__ ) UpperCamelCase = self.feature_extraction_class.from_json_file(UpperCAmelCase__ ) UpperCamelCase = feat_extract_first.to_dict() UpperCamelCase = feat_extract_second.to_dict() UpperCamelCase = dict_first.pop('mel_filters' ) UpperCamelCase = dict_second.pop('mel_filters' ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __lowerCAmelCase ( self : List[str] ): """simple docstring""" UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] UpperCamelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched UpperCamelCase = feature_extractor(UpperCAmelCase__ , return_tensors='np' , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking UpperCamelCase = feature_extractor( UpperCAmelCase__ , return_tensors='np' , sampling_rate=4_41_00 , mask_audio=UpperCAmelCase__ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. UpperCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] UpperCamelCase = np.asarray(UpperCAmelCase__ ) UpperCamelCase = feature_extractor(UpperCAmelCase__ , return_tensors='np' , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ): """simple docstring""" UpperCamelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech UpperCamelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def __lowerCAmelCase ( self : str ): """simple docstring""" UpperCamelCase = self._load_datasamples(1 ) UpperCamelCase = TvltFeatureExtractor() UpperCamelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) ) UpperCamelCase = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCAmelCase__ , atol=1e-4 ) )
282
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu""" def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : Tuple=100 , __magic_name__ : Optional[int]=" " ) -> List[str]: lowercase : List[Any] =text.split(__magic_name__ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__magic_name__ ) , __magic_name__ )] def _lowerCAmelCase ( __magic_name__ : dict ) -> dict: lowercase , lowercase : int =[], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(__magic_name__ ): titles.append(title if title is not None else '''''' ) texts.append(__magic_name__ ) return {"title": titles, "text": texts} def _lowerCAmelCase ( __magic_name__ : dict , __magic_name__ : DPRContextEncoder , __magic_name__ : DPRContextEncoderTokenizerFast ) -> dict: lowercase : Dict =ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=__magic_name__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids'''] lowercase : Optional[int] =ctx_encoder(input_ids.to(device=__magic_name__ ) , return_dict=__magic_name__ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def _lowerCAmelCase ( __magic_name__ : "RagExampleArguments" , __magic_name__ : "ProcessingArguments" , __magic_name__ : "IndexHnswArguments" , ) -> str: ###################################### logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase : Tuple =load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase : Optional[int] =dataset.map(__magic_name__ , batched=__magic_name__ , num_proc=processing_args.num_proc ) # And compute the embeddings lowercase : Any =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__magic_name__ ) lowercase : Any =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowercase : Optional[int] =Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space lowercase : Optional[Any] =dataset.map( partial(__magic_name__ , ctx_encoder=__magic_name__ , ctx_tokenizer=__magic_name__ ) , batched=__magic_name__ , batch_size=processing_args.batch_size , features=__magic_name__ , ) # And finally save your dataset lowercase : Optional[Any] =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(__magic_name__ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase : Union[str, Any] =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=__magic_name__ ) # And save the index lowercase : Dict =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(__magic_name__ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) lowerCamelCase_ = field( default=lowercase__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) lowerCamelCase_ = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) lowerCamelCase_ = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) lowerCamelCase_ = field( default=str(Path(lowercase__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=lowercase__ , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) lowerCamelCase_ = field( default=16 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = field( default=7_68 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) lowerCamelCase_ = field( default=1_28 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
92
0
"""simple docstring""" import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __magic_name__ = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __magic_name__ = [file for file in filepaths if file != file.lower()] if upper_files: print(F'{len(upper_files)} files contain uppercase characters:') print("""\n""".join(upper_files) + """\n""") __magic_name__ = [file for file in filepaths if """ """ in file] if space_files: print(F'{len(space_files)} files contain space characters:') print("""\n""".join(space_files) + """\n""") __magic_name__ = [file for file in filepaths if """-""" in file] if hyphen_files: print(F'{len(hyphen_files)} files contain hyphen characters:') print("""\n""".join(hyphen_files) + """\n""") __magic_name__ = [file for file in filepaths if os.sep not in file] if nodir_files: print(F'{len(nodir_files)} files are not in a directory:') print("""\n""".join(nodir_files) + """\n""") __magic_name__ = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
129
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right UpperCamelCase_ = 128022 UpperCamelCase_ = 128028 @require_sentencepiece class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = MaMaaaTokenizer lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = True def lowerCamelCase_ ( self : Dict ): '''simple docstring''' super().setUp() lowercase : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] lowercase : List[Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) lowercase : List[Any] =Path(self.tmpdirname ) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowercase : Tuple =MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : int ): '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict ): '''simple docstring''' return ( "This is a test", "This is a test", ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Tuple ='''</s>''' lowercase : Union[str, Any] =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[Any] =self.get_tokenizer() lowercase : Optional[Any] =list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''</s>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''<s>''' ) self.assertEqual(len(UpperCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('''Skip this test while all models are still to be uploaded.''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Union[str, Any] =self.get_tokenizer() lowercase : str =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2, 3, 4, 5, 6] , ) lowercase : Optional[int] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) lowercase : Tuple =tokenizer.convert_tokens_to_string(UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , '''This is a test''' ) @slow def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' # fmt: off lowercase : int ={'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): lowerCamelCase_ = 'facebook/m2m100_418M' lowerCamelCase_ = [ 'In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence', ] lowerCamelCase_ = [ 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', ] # fmt: off lowerCamelCase_ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2] @classmethod def lowerCamelCase_ ( cls : Optional[Any] ): '''simple docstring''' lowercase : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' ) lowercase : Optional[int] =1 return cls def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 ) self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 ) self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 ) self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : List[str] =self.tokenizer.get_vocab() self.assertEqual(len(UpperCAmelCase__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab['''<unk>'''] , 3 ) self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : List[Any] ='''en''' lowercase : Optional[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids ) # fmt: off lowercase : str =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on lowercase : Optional[Any] =self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) lowercase : Optional[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Any =tempfile.mkdtemp() lowercase : Tuple =self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(UpperCAmelCase__ ) lowercase : Union[str, Any] =MaMaaaTokenizer.from_pretrained(UpperCAmelCase__ ) self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase__ ) @require_torch def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[str] ='''en''' lowercase : int ='''fr''' lowercase : Union[str, Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors='''pt''' ) lowercase : str =shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: lowercase : int =batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[int] ='''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) lowercase : Union[str, Any] ='''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : int ='''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) lowercase : Optional[Any] ='''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : Optional[Any] =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' ) self.assertEqual( nested_simplify(UpperCAmelCase__ ) , { # en_XX, A, test, EOS '''input_ids''': [[128022, 58, 4183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128006, } , )
92
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class _lowerCAmelCase ( unittest.TestCase ): def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=30 , _UpperCamelCase=400 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=1 / 255 , _UpperCamelCase=True , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=True , ) -> int: lowerCAmelCase_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = min_resolution lowerCAmelCase_ = max_resolution lowerCAmelCase_ = do_resize lowerCAmelCase_ = size lowerCAmelCase_ = do_rescale lowerCAmelCase_ = rescale_factor lowerCAmelCase_ = do_normalize lowerCAmelCase_ = image_mean lowerCAmelCase_ = image_std lowerCAmelCase_ = do_pad def __a ( self ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def __a ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[Any]: if not batched: lowerCAmelCase_ = image_inputs[0] if isinstance(UpperCAmelCase__ , Image.Image ): lowerCAmelCase_ = image.size else: lowerCAmelCase_ = image.shape[1], image.shape[2] if w < h: lowerCAmelCase_ = int(self.size["shortest_edge"] * h / w ) lowerCAmelCase_ = self.size['''shortest_edge'''] elif w > h: lowerCAmelCase_ = self.size['''shortest_edge'''] lowerCAmelCase_ = int(self.size["shortest_edge"] * w / h ) else: lowerCAmelCase_ = self.size['''shortest_edge'''] lowerCAmelCase_ = self.size['''shortest_edge'''] else: lowerCAmelCase_ = [] for image in image_inputs: lowerCAmelCase_ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCAmelCase_ = max(UpperCAmelCase__ , key=lambda _UpperCamelCase : item[0] )[0] lowerCAmelCase_ = max(UpperCAmelCase__ , key=lambda _UpperCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _lowerCAmelCase ( lowercase__ , unittest.TestCase ): _lowercase =DetrImageProcessor if is_vision_available() else None def __a ( self ) -> Tuple: lowerCAmelCase_ = DetrImageProcessingTester(self ) @property def __a ( self ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def __a ( self ) -> List[str]: lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , "image_mean" ) ) self.assertTrue(hasattr(UpperCAmelCase__ , "image_std" ) ) self.assertTrue(hasattr(UpperCAmelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(UpperCAmelCase__ , "do_rescale" ) ) self.assertTrue(hasattr(UpperCAmelCase__ , "rescale_factor" ) ) self.assertTrue(hasattr(UpperCAmelCase__ , "do_resize" ) ) self.assertTrue(hasattr(UpperCAmelCase__ , "size" ) ) self.assertTrue(hasattr(UpperCAmelCase__ , "do_pad" ) ) def __a ( self ) -> List[str]: lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} ) self.assertEqual(image_processor.do_pad , UpperCAmelCase__ ) lowerCAmelCase_ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__ ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , UpperCAmelCase__ ) def __a ( self ) -> Optional[Any]: pass def __a ( self ) -> List[Any]: lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) lowerCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) # Test not batched input lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __a ( self ) -> Tuple: lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __a ( self ) -> Dict: lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: lowerCAmelCase_ = json.loads(f.read() ) lowerCAmelCase_ = {'''image_id''': 39_769, '''annotations''': target} # encode them lowerCAmelCase_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" ) lowerCAmelCase_ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors="pt" ) # verify pixel values lowerCAmelCase_ = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase__ ) lowerCAmelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4 ) ) # verify area lowerCAmelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase__ ) ) # verify boxes lowerCAmelCase_ = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase__ ) lowerCAmelCase_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase__ , atol=1e-3 ) ) # verify image_id lowerCAmelCase_ = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase__ ) ) # verify is_crowd lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase__ ) ) # verify class_labels lowerCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase__ ) ) # verify orig_size lowerCAmelCase_ = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase__ ) ) # verify size lowerCAmelCase_ = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase__ ) ) @slow def __a ( self ) -> Optional[int]: lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: lowerCAmelCase_ = json.loads(f.read() ) lowerCAmelCase_ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} lowerCAmelCase_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them lowerCAmelCase_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" ) lowerCAmelCase_ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors="pt" ) # verify pixel values lowerCAmelCase_ = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase__ ) lowerCAmelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4 ) ) # verify area lowerCAmelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase__ ) ) # verify boxes lowerCAmelCase_ = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase__ ) lowerCAmelCase_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase__ , atol=1e-3 ) ) # verify image_id lowerCAmelCase_ = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase__ ) ) # verify is_crowd lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase__ ) ) # verify class_labels lowerCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase__ ) ) # verify masks lowerCAmelCase_ = 822_873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCAmelCase__ ) # verify orig_size lowerCAmelCase_ = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase__ ) ) # verify size lowerCAmelCase_ = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase__ ) )
290
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int: try: lowercase : Any =int(__magic_name__ ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) lowercase : Optional[Any] =2 lowercase : Dict =0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 lowercase : Union[str, Any] =i while n % i == 0: lowercase : Optional[int] =n // i i += 1 return int(__magic_name__ ) if __name__ == "__main__": print(f'''{solution() = }''')
92
0
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": A__ : Tuple = pd.read_csv('''sample_data.csv''', header=None) A__ : Dict = df.shape[:1][0] # If you're using some other dataset input the target column A__ : List[Any] = df.iloc[:, 1:2] A__ : str = actual_data.values.reshape(len_data, 1) A__ : str = MinMaxScaler().fit_transform(actual_data) A__ : Dict = 1_0 A__ : Any = 5 A__ : int = 2_0 A__ : Any = len_data - periods * look_back A__ : Optional[int] = actual_data[:division] A__ : Tuple = actual_data[division - look_back :] A__ , A__ : Dict = [], [] A__ , A__ : Any = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) A__ : Optional[int] = np.array(train_x) A__ : int = np.array(test_x) A__ : List[str] = np.array([list(i.ravel()) for i in train_y]) A__ : Optional[Any] = np.array([list(i.ravel()) for i in test_y]) A__ : str = Sequential() model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(6_4, input_shape=(1_2_8, 1))) model.add(Dense(forward_days)) model.compile(loss='''mean_squared_error''', optimizer='''adam''') A__ : Optional[Any] = model.fit( x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4 ) A__ : Any = model.predict(x_test)
286
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __SCREAMING_SNAKE_CASE ( lowercase__ ): lowerCamelCase_ = 'speech_to_text_2' lowerCamelCase_ = ['past_key_values'] lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : int , UpperCAmelCase__ : Dict=10000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1024 , **UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : List[str] =vocab_size lowercase : Optional[int] =d_model lowercase : Optional[Any] =decoder_ffn_dim lowercase : Any =decoder_layers lowercase : Dict =decoder_attention_heads lowercase : List[Any] =dropout lowercase : List[Any] =attention_dropout lowercase : Any =activation_dropout lowercase : Optional[Any] =activation_function lowercase : Optional[int] =init_std lowercase : Dict =decoder_layerdrop lowercase : Optional[int] =use_cache lowercase : Optional[Any] =decoder_layers lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True lowercase : str =max_target_positions super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
92
0
from cva import destroyAllWindows, imread, imshow, waitKey def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ): """simple docstring""" _SCREAMING_SNAKE_CASE = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(UpperCAmelCase__ ): for j in range(UpperCAmelCase__ ): _SCREAMING_SNAKE_CASE = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image snake_case : int = imread('image_data/lena.jpg', 1) # convert to its negative snake_case : Optional[Any] = convert_to_negative(img) # show result image imshow('negative of original image', img) waitKey(0) destroyAllWindows()
605
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=[10, 20, 30, 40] , UpperCAmelCase__ : Any=[2, 2, 3, 2] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[Any]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Dict=[2, 3, 4] , UpperCAmelCase__ : Optional[int]=None , ): '''simple docstring''' lowercase : List[Any] =parent lowercase : Tuple =batch_size lowercase : List[str] =image_size lowercase : List[Any] =num_channels lowercase : Union[str, Any] =num_stages lowercase : int =hidden_sizes lowercase : Any =depths lowercase : Tuple =is_training lowercase : str =use_labels lowercase : List[Any] =intermediate_size lowercase : int =hidden_act lowercase : Union[str, Any] =num_labels lowercase : Optional[int] =initializer_range lowercase : int =out_features lowercase : List[str] =out_indices lowercase : str =scope def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : Dict =None if self.use_labels: lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_labels ) lowercase : Dict =self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Any ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' lowercase : Dict =ConvNextVaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Dict =ConvNextVaForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : str =model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ): '''simple docstring''' lowercase : Union[str, Any] =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[int] =model(UpperCAmelCase__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase : Optional[Any] =None lowercase : str =ConvNextVaBackbone(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() lowercase : Optional[Any] =model(UpperCAmelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : Any =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : str =config_and_inputs lowercase : Any ={'''pixel_values''': pixel_values} return config, inputs_dict def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : str =self.prepare_config_and_inputs() lowercase , lowercase , lowercase : List[str] =config_and_inputs lowercase : Optional[Any] ={'''pixel_values''': pixel_values, '''labels''': labels} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ): lowerCamelCase_ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCamelCase_ = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : Dict =ConvNextVaModelTester(self ) lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Any ): '''simple docstring''' return @unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' pass def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : Optional[int] =True if model_class.__name__ in [ *get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ ), ]: continue lowercase : Dict =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : List[Any] =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: lowercase , lowercase : Any =self.model_tester.prepare_config_and_inputs_with_labels() lowercase : List[Any] =False lowercase : Any =True if ( model_class.__name__ in [*get_values(UpperCAmelCase__ ), *get_values(UpperCAmelCase__ )] or not model_class.supports_gradient_checkpointing ): continue lowercase : Any =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.gradient_checkpointing_enable() model.train() lowercase : Optional[Any] =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) lowercase : int =model(**UpperCAmelCase__ ).loss loss.backward() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Dict =model_class(UpperCAmelCase__ ) lowercase : Union[str, Any] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : int =[*signature.parameters.keys()] lowercase : Optional[Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self : int ): '''simple docstring''' def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ): lowercase : int =model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): lowercase : Any =model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowercase : Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase : List[Any] =self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : List[str] =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase : Tuple =True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : List[Any] =ConvNextVaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def _lowerCAmelCase ( ) -> List[Any]: lowercase : Union[str, Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : Tuple =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase__ ) lowercase : int =self.default_image_processor lowercase : List[str] =prepare_img() lowercase : List[Any] =preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ ) # forward pass with torch.no_grad(): lowercase : Dict =model(**UpperCAmelCase__ ) # verify the logits lowercase : Optional[Any] =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase__ ) lowercase : Tuple =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
92
0
"""simple docstring""" import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __a ( A , A , A=1_024 , A=1_024 , A=False , **A ) -> str: '''simple docstring''' A__ = AutoTokenizer.from_pretrained(A ) A__ = SeqaSeqDataset(A , A , A , A , type_path="train" , **A ) A__ = tok.pad_token_id def get_lens(A ): A__ = tqdm( DataLoader(A , batch_size=512 , num_workers=8 , shuffle=A , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) A__ = [] for batch in dl: A__ = batch['''input_ids'''].ne(A ).sum(1 ).tolist() A__ = batch['''labels'''].ne(A ).sum(1 ).tolist() if consider_target: for src, tgt in zip(A , A ): max_lens.append(max(A , A ) ) else: max_lens.extend(A ) return max_lens A__ = get_lens(A ) A__ = SeqaSeqDataset(A , A , A , A , type_path="val" , **A ) A__ = get_lens(A ) pickle_save(A , train_ds.len_file ) pickle_save(A , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
337
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels UpperCamelCase_ = object() # For specifying empty leaf dict `{}` UpperCamelCase_ = object() def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> Optional[int]: lowercase : Optional[Any] =tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ): lowercase : Union[str, Any] =[x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )] if matches and all(__magic_name__ ): return True return False def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[str]: def replace(__magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ): for rule, replacement in rules: if _match(__magic_name__ , __magic_name__ ): return replacement return val return replace def _lowerCAmelCase ( ) -> int: return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , __magic_name__ )), (("transformer", "wte", "embedding"), P('''mp''' , __magic_name__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , __magic_name__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__magic_name__ , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , __magic_name__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _lowerCAmelCase ( __magic_name__ : str ) -> int: lowercase : int =_get_partition_rules() lowercase : Tuple =_replacement_rules(__magic_name__ ) lowercase : Any ={k: _unmatched for k in flatten_dict(__magic_name__ )} lowercase : Any ={k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__magic_name__ ) )
92
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __A = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["GPTSw3Tokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
346
'''simple docstring''' from collections import defaultdict def _lowerCAmelCase ( __magic_name__ : int ) -> int: lowercase : Optional[Any] =1 lowercase : Union[str, Any] =True for v in tree[start]: if v not in visited: ret += dfs(__magic_name__ ) if ret % 2 == 0: cuts.append(__magic_name__ ) return ret def _lowerCAmelCase ( ) -> int: dfs(1 ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ = 10, 9 UpperCamelCase_ = defaultdict(list) UpperCamelCase_ = {} UpperCamelCase_ = [] UpperCamelCase_ = 0 UpperCamelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
92
0
from math import ceil def lowercase_ ( _UpperCamelCase = 10_01 ): '''simple docstring''' __lowercase = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): __lowercase = 2 * i + 1 __lowercase = 2 * i __lowercase = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: a : Tuple = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
639
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase_ = logging.get_logger(__name__) def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Dict: lowercase : List[str] =R'''\w+[.]\d+''' lowercase : List[str] =re.findall(__magic_name__ , __magic_name__ ) for pat in pats: lowercase : Optional[int] =key.replace(__magic_name__ , '''_'''.join(pat.split('''.''' ) ) ) return key def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ) -> str: lowercase : Dict =pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowercase : str =pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowercase : Dict =pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowercase : Tuple =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowercase : Tuple =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowercase : str =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowercase : Optional[Any] =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowercase : Dict =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowercase : Union[str, Any] =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=42 ) -> List[str]: # Step 1: Convert pytorch tensor to numpy lowercase : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowercase : str =flax_model.init_weights(PRNGKey(__magic_name__ ) ) lowercase : Dict =flatten_dict(__magic_name__ ) lowercase : Dict ={} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowercase : Dict =rename_key(__magic_name__ ) lowercase : Optional[int] =tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowercase , lowercase : Any =rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowercase : Tuple =jnp.asarray(__magic_name__ ) return unflatten_dict(__magic_name__ )
92
0
from math import pi, sqrt def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : float ) -> float: """simple docstring""" if num <= 0: raise ValueError('math domain error' ) if num > 171.5: raise OverflowError('math range error' ) elif num - int(lowerCAmelCase_ ) not in (0, 0.5): raise NotImplementedError('num must be an integer or a half-integer' ) elif num == 0.5: return sqrt(lowerCAmelCase_ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def SCREAMING_SNAKE_CASE__ ( ) -> None: """simple docstring""" assert gamma(0.5 ) == sqrt(lowerCAmelCase_ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() __SCREAMING_SNAKE_CASE = 1.0 while num: __SCREAMING_SNAKE_CASE = float(input('Gamma of: ')) print(f"""gamma({num}) = {gamma(num)}""") print('\nEnter 0 to exit...')
220
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. UpperCamelCase_ = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. UpperCamelCase_ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. UpperCamelCase_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, float]: lowercase : int =len([g for position, g in enumerate(__magic_name__ ) if g == main_target[position]] ) return (item, float(__magic_name__ )) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> tuple[str, str]: lowercase : Any =random.randint(0 , len(__magic_name__ ) - 1 ) lowercase : Tuple =parent_a[:random_slice] + parent_a[random_slice:] lowercase : List[str] =parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] ) -> str: lowercase : Union[str, Any] =list(__magic_name__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowercase : Dict =random.choice(__magic_name__ ) return "".join(__magic_name__ ) def _lowerCAmelCase ( __magic_name__ : tuple[str, float] , __magic_name__ : list[tuple[str, float]] , __magic_name__ : list[str] , ) -> list[str]: lowercase : Any =[] # Generate more children proportionally to the fitness score. lowercase : Dict =int(parent_a[1] * 100 ) + 1 lowercase : List[str] =10 if child_n >= 10 else child_n for _ in range(__magic_name__ ): lowercase : List[str] =population_score[random.randint(0 , __magic_name__ )][0] lowercase , lowercase : Dict =crossover(parent_a[0] , __magic_name__ ) # Append new string to the population list. pop.append(mutate(__magic_name__ , __magic_name__ ) ) pop.append(mutate(__magic_name__ , __magic_name__ ) ) return pop def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : list[str] , __magic_name__ : bool = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowercase : List[str] =f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(__magic_name__ ) # Verify that the target contains no genes besides the ones inside genes variable. lowercase : Optional[int] =sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowercase : Dict =f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(__magic_name__ ) # Generate random starting population. lowercase : int =[] for _ in range(__magic_name__ ): population.append(''''''.join([random.choice(__magic_name__ ) for i in range(len(__magic_name__ ) )] ) ) # Just some logs to know what the algorithms is doing. lowercase , lowercase : Optional[int] =0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__magic_name__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowercase : List[str] =[evaluate(__magic_name__ , __magic_name__ ) for item in population] # Check if there is a matching evolution. lowercase : int =sorted(__magic_name__ , key=lambda __magic_name__ : x[1] , reverse=__magic_name__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowercase : Any =population[: int(N_POPULATION / 3 )] population.clear() population.extend(__magic_name__ ) # Normalize population score to be between 0 and 1. lowercase : Dict =[ (item, score / len(__magic_name__ )) for item, score in population_score ] # This is selection for i in range(__magic_name__ ): population.extend(select(population_score[int(__magic_name__ )] , __magic_name__ , __magic_name__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__magic_name__ ) > N_POPULATION: break if __name__ == "__main__": UpperCamelCase_ = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) UpperCamelCase_ = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
92
0