code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging _a : Optional[int] = logging.get_logger(__name__) _a : Any = {'vocab_file': 'spiece.model'} _a : Dict = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', } } # TODO(PVP) - this should be removed in Transformers v5 _a : List[str] = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } _a : str = '▁' class __A ( SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Tuple = VOCAB_FILES_NAMES _UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"] def __init__( self , a__ , a__="</s>" , a__="<unk>" , a__="<pad>" , a__=100 , a__=None , a__ = None , a__=True , **a__ , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: _lowerCAmelCase : List[Any] = [F"<extra_id_{i}>" for i in range(a__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens _lowerCAmelCase : Union[str, Any] = len(set(filter(lambda a__ : bool("""extra_id""" in str(a__ ) ) , a__ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) if legacy: logger.warning_once( F"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to" """ read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" ) _lowerCAmelCase : str = legacy _lowerCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , ) _lowerCAmelCase : Any = vocab_file _lowerCAmelCase : Any = extra_ids _lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(a__ ) @staticmethod def __A ( a__ , a__ , a__ ): if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: _lowerCAmelCase : List[Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F" {pretrained_model_name_or_path} automatically truncating your input to" F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , a__ , ) return max_model_length @property def __A ( self ): return self.sp_model.get_piece_size() + self._extra_ids def __A ( self ): _lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __A ( self , a__ , a__ = None , a__ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(a__ )) + [1] return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1] def __A ( self ): return list( set(filter(lambda a__ : bool(re.search(r"""<extra_id_\d+>""" , a__ ) ) is not None , self.additional_special_tokens ) ) ) def __A ( self ): return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()] def __A ( self , a__ ): if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def __A ( self , a__ , a__ = None ): _lowerCAmelCase : Tuple = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def __A ( self , a__ , a__ = None ): _lowerCAmelCase : Tuple = self._add_eos_if_not_present(a__ ) if token_ids_a is None: return token_ids_a else: _lowerCAmelCase : Optional[Any] = self._add_eos_if_not_present(a__ ) return token_ids_a + token_ids_a def __getstate__( self ): _lowerCAmelCase : Tuple = self.__dict__.copy() _lowerCAmelCase : List[str] = None return state def __setstate__( self , a__ ): _lowerCAmelCase : Optional[int] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase : Any = {} _lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __A ( self , a__ , **a__ ): # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: _lowerCAmelCase : Union[str, Any] = SPIECE_UNDERLINE + text.replace(a__ , """ """ ) return super().tokenize(a__ , **a__ ) def __A ( self , a__ , **a__ ): if not self.legacy: _lowerCAmelCase : int = text.startswith(a__ ) if is_first: _lowerCAmelCase : Tuple = text[1:] _lowerCAmelCase : List[Any] = self.sp_model.encode(a__ , out_type=a__ ) if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(a__ ): _lowerCAmelCase : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def __A ( self , a__ ): if token.startswith("""<extra_id_""" ): _lowerCAmelCase : Any = re.match(r"""<extra_id_(\d+)>""" , a__ ) _lowerCAmelCase : Dict = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(a__ ) def __A ( self , a__ ): if index < self.sp_model.get_piece_size(): _lowerCAmelCase : Union[str, Any] = self.sp_model.IdToPiece(a__ ) else: _lowerCAmelCase : Union[str, Any] = F"<extra_id_{self.vocab_size - 1 - index}>" return token def __A ( self , a__ ): _lowerCAmelCase : Union[str, Any] = [] _lowerCAmelCase : List[str] = """""" _lowerCAmelCase : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(a__ ) + token _lowerCAmelCase : Optional[int] = True _lowerCAmelCase : Union[str, Any] = [] else: current_sub_tokens.append(a__ ) _lowerCAmelCase : Tuple = False out_string += self.sp_model.decode(a__ ) return out_string.strip() def __A ( self , a__ , a__ = None ): if not os.path.isdir(a__ ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return _lowerCAmelCase : Optional[Any] = os.path.join( a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , a__ ) elif not os.path.isfile(self.vocab_file ): with open(a__ , """wb""" ) as fi: _lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(a__ ) return (out_vocab_file,)
44
'''simple docstring''' import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) a : str = getLogger(__name__) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ): '''simple docstring''' UpperCAmelCase : List[Any] = str(__magic_name__ ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ ) UpperCAmelCase : List[str] = Path(__magic_name__ ) UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" ) torch.cuda.set_device(__magic_name__ ) UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda() if fpaa: UpperCAmelCase : int = model.half() # determine if we need to increase num_beams use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: UpperCAmelCase : Optional[Any] = num_return_sequences UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. if max_source_length is None: UpperCAmelCase : Any = tokenizer.model_max_length if prefix is None: UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or "" UpperCAmelCase : Dict = SeqaSeqDataset( __magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ ) UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn ) UpperCAmelCase : Any = [] for batch in tqdm(__magic_name__ ): UpperCAmelCase : List[Any] = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , ) UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) UpperCAmelCase : int = batch["ids"] if num_return_sequences > 1: UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(__magic_name__ ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(__magic_name__ , __magic_name__ ) return results, sampler.num_replicas def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ ) parser.add_argument( "--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" ) parser.add_argument( "--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument( "--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) UpperCAmelCase : Union[str, Any] = time.time() UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args() UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ ) if generate_kwargs and args.local_rank <= 0: print(F"parsed the following generate kwargs: {generate_kwargs}" ) UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking. UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(F"Found files at {json_save_dir} please move or remove them." ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. UpperCAmelCase : Optional[Any] = {} if args.src_lang is not None: UpperCAmelCase : List[str] = args.src_lang if args.tgt_lang is not None: UpperCAmelCase : Dict = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=__magic_name__ ) UpperCAmelCase , UpperCAmelCase : str = eval_data_dir( args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , ) if args.local_rank <= 0: UpperCAmelCase : List[str] = Path(args.save_dir ) save_dir.mkdir(exist_ok=__magic_name__ ) UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout ) UpperCAmelCase : Dict = combine_partial_results(__magic_name__ ) if args.num_return_sequences > 1: UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" ) print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" ) save_json(__magic_name__ , __magic_name__ ) return UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(__magic_name__ ) as f: UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )] # Calculate metrics, save metrics, and save _generations.txt UpperCAmelCase : Optional[int] = "translation" in args.task UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge" UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ ) UpperCAmelCase : Any = len(__magic_name__ ) UpperCAmelCase : Union[str, Any] = time.time() - start_time UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 ) UpperCAmelCase : Optional[Any] = num_replicas # TODO(@stas00): add whatever metadata to metrics UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" ) save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ ) print(__magic_name__ ) write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) ) if args.debug: write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) ) else: shutil.rmtree(__magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = [] for partial_result in partial_results: records.extend(__magic_name__ ) UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] ) UpperCAmelCase : List[Any] = [x["pred"] for x in records] return preds def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = time.time() logger.info("waiting for all nodes to finish" ) UpperCAmelCase : Union[str, Any] = None while (time.time() - start_wait) < timeout: UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) ) if len(__magic_name__ ) < num_replicas: continue try: # make sure all json files are fully saved UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
311
0
import re def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : int = re.compile(R"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" ) if match := re.search(lowerCamelCase__ , lowerCamelCase__ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator('''+918827897895'''))
360
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''NllbTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''NllbTokenizerFast'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
121
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase : Union[str, Any] = { "configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[str] = ["VivitImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[int] = [ "VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "VivitModel", "VivitPreTrainedModel", "VivitForVideoClassification", ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
252
import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values UpperCAmelCase : str = argparse.ArgumentParser() parser.add_argument("--user", type=str, default="ubuntu") parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--key_path", type=str, default=None) parser.add_argument("--instance", type=str, default="V100:1") parser.add_argument("--provider", type=str, default="cheapest") parser.add_argument("--use_spot", type=bool, default=False) parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py") UpperCAmelCase, UpperCAmelCase : Optional[Any] = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError("Cannot specify both BYO and on-demand cluster args") UpperCAmelCase : Dict = rh.cluster( name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path} ) else: UpperCAmelCase : str = rh.cluster( name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) UpperCAmelCase : str = args.example.rsplit("/", 1)[0] # Set up remote environment cluster.install_packages(["pip:./"]) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""]) cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"]) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""]) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
252
1
'''simple docstring''' import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowerCAmelCase_ : Dict = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class __SCREAMING_SNAKE_CASE : """simple docstring""" __a =PegasusConfig __a ={} __a ='gelu' def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=13 , __a : List[Any]=7 , __a : int=True , __a : List[Any]=False , __a : Optional[int]=99 , __a : Optional[Any]=32 , __a : Optional[Any]=5 , __a : Union[str, Any]=4 , __a : Dict=37 , __a : Optional[Any]=0.1 , __a : List[str]=0.1 , __a : Union[str, Any]=20 , __a : List[Any]=2 , __a : Optional[Any]=1 , __a : List[Any]=0 , ): _a = parent _a = batch_size _a = seq_length _a = is_training _a = use_labels _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = eos_token_id _a = pad_token_id _a = bos_token_id def UpperCamelCase__ ( self : Optional[Any] ): _a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) _a = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) _a = np.concatenate([input_ids, eos_tensor] , axis=1 ) _a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _a = prepare_pegasus_inputs_dict(__a , __a , __a ) return config, inputs_dict def UpperCamelCase__ ( self : Optional[int] , __a : int , __a : str , __a : Tuple ): _a = 20 _a = model_class_name(__a ) _a = model.encode(inputs_dict["input_ids"] ) _a , _a = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) _a = model.init_cache(decoder_input_ids.shape[0] , __a , __a ) _a = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) _a = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _a = model.decode( decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , ) _a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) _a = model.decode( decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , ) _a = model.decode(__a , __a ) _a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) def UpperCamelCase__ ( self : List[str] , __a : int , __a : Optional[Any] , __a : Any ): _a = 20 _a = model_class_name(__a ) _a = model.encode(inputs_dict["input_ids"] ) _a , _a = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) _a = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _a = model.init_cache(decoder_input_ids.shape[0] , __a , __a ) _a = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _a = model.decode( decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , ) _a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) _a = model.decode( decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , ) _a = model.decode(__a , __a , decoder_attention_mask=__a ) _a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[Any] , lowercase : Tuple , lowercase : Tuple=None , lowercase : List[str]=None , ) -> Dict: if attention_mask is None: _a = np.not_equal(lowercase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _a = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a =(FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a =True __a =False __a =False __a =False def UpperCamelCase__ ( self : Tuple ): _a = FlaxPegasusModelTester(self ) _a = ConfigTester(self , config_class=__a ) def UpperCamelCase__ ( self : Tuple ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self : Dict ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__a , __a , __a ) def UpperCamelCase__ ( self : int ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a ) def UpperCamelCase__ ( self : str ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _a = self._prepare_for_class(__a , __a ) _a = model_class(__a ) @jax.jit def encode_jitted(__a : Union[str, Any] , __a : Any=None , **__a : Any ): return model.encode(input_ids=__a , attention_mask=__a ) with self.subTest("JIT Enabled" ): _a = encode_jitted(**__a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): _a = encode_jitted(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) ) for jitted_output, output in zip(__a , __a ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCamelCase__ ( self : Union[str, Any] ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _a = model_class(__a ) _a = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) _a = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(__a : Union[str, Any] , __a : List[Any] , __a : str ): return model.decode( decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , ) with self.subTest("JIT Enabled" ): _a = decode_jitted(**__a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): _a = decode_jitted(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) ) for jitted_output, output in zip(__a , __a ): self.assertEqual(jitted_output.shape , output.shape ) @slow def UpperCamelCase__ ( self : Optional[int] ): for model_class_name in self.all_model_classes: _a = model_class_name.from_pretrained("google/pegasus-large" , from_pt=__a ) _a = np.ones((1, 1) ) _a = model(__a ) self.assertIsNotNone(__a ) @slow def UpperCamelCase__ ( self : Optional[Any] ): _a = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" ) _a = PegasusTokenizer.from_pretrained("google/pegasus-xsum" ) _a = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] _a = [ "California's largest electricity provider has turned off power to hundreds of thousands of customers.", "Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.", ] _a = tokenizer(__a , return_tensors="np" , truncation=__a , max_length=5_12 , padding=__a ) _a = model.generate(**__a , num_beams=2 ).sequences _a = tokenizer.batch_decode(__a , skip_special_tokens=__a ) assert tgt_text == decoded
352
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : int = '▁' lowerCAmelCase_ : Optional[Any] = { 'vocab_file': 'vocab.json', 'spm_file': 'sentencepiece.bpe.model', } lowerCAmelCase_ : Optional[int] = { 'vocab_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json' ), }, 'spm_file': { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model' ) }, } lowerCAmelCase_ : List[str] = { 'facebook/s2t-small-librispeech-asr': 10_24, } lowerCAmelCase_ : List[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de'] lowerCAmelCase_ : Union[str, Any] = {'mustc': MUSTC_LANGS} class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =VOCAB_FILES_NAMES __a =PRETRAINED_VOCAB_FILES_MAP __a =MAX_MODEL_INPUT_SIZES __a =['input_ids', 'attention_mask'] __a =[] def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Any , __a : Any="<s>" , __a : List[str]="</s>" , __a : str="<pad>" , __a : List[str]="<unk>" , __a : Union[str, Any]=False , __a : Any=False , __a : List[str]=None , __a : Optional[int]=None , __a : Optional[Dict[str, Any]] = None , **__a : int , ): _a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , ) _a = do_upper_case _a = do_lower_case _a = load_json(__a ) _a = {v: k for k, v in self.encoder.items()} _a = spm_file _a = load_spm(__a , self.sp_model_kwargs ) if lang_codes is not None: _a = lang_codes _a = LANGUAGES[lang_codes] _a = [f'<lang:{lang}>' for lang in self.langs] _a = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs} _a = self.lang_tokens _a = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: _a = {} @property def UpperCamelCase__ ( self : str ): return len(self.encoder ) @property def UpperCamelCase__ ( self : str ): return self._tgt_lang @tgt_lang.setter def UpperCamelCase__ ( self : Optional[int] , __a : Any ): _a = new_tgt_lang self.set_tgt_lang_special_tokens(__a ) def UpperCamelCase__ ( self : List[Any] , __a : str ): _a = self.lang_code_to_id[tgt_lang] _a = [lang_code_id] def UpperCamelCase__ ( self : Dict , __a : str ): return self.sp_model.encode(__a , out_type=__a ) def UpperCamelCase__ ( self : List[str] , __a : Any ): return self.encoder.get(__a , self.encoder[self.unk_token] ) def UpperCamelCase__ ( self : str , __a : int ): return self.decoder.get(__a , self.unk_token ) def UpperCamelCase__ ( self : str , __a : List[str] ): _a = [] _a = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: _a = self.sp_model.decode(__a ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " _a = [] else: current_sub_tokens.append(__a ) _a = self.sp_model.decode(__a ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def UpperCamelCase__ ( self : int , __a : Any , __a : int=None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def UpperCamelCase__ ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a ) _a = [1] * len(self.prefix_tokens ) _a = [1] if token_ids_a is None: return prefix_ones + ([0] * len(__a )) + suffix_ones return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones def UpperCamelCase__ ( self : Union[str, Any] ): _a = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ): _a = self.__dict__.copy() _a = None return state def __setstate__( self : str , __a : Dict ): _a = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _a = {} _a = load_spm(self.spm_file , self.sp_model_kwargs ) def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ): _a = Path(__a ) assert save_dir.is_dir(), f'{save_directory} should be a directory' _a = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"] ) _a = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"] ) save_json(self.encoder , __a ) if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __a ) elif not os.path.isfile(self.spm_file ): with open(__a , "wb" ) as fi: _a = self.sp_model.serialized_model_proto() fi.write(__a ) return (str(__a ), str(__a )) def _lowerCamelCase ( lowercase : str , lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: _a = sentencepiece.SentencePieceProcessor(**lowercase ) spm.Load(str(lowercase ) ) return spm def _lowerCamelCase ( lowercase : str ) -> Union[Dict, List]: with open(lowercase , "r" ) as f: return json.load(lowercase ) def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> None: with open(lowercase , "w" ) as f: json.dump(lowercase , lowercase , indent=2 )
346
0
'''simple docstring''' from math import factorial def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00 ) -> int: return sum(int(a__ ) for x in str(factorial(a__ ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
162
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def lowercase ( a__ : Dict , a__ : Dict , a__ : List[str] , a__ : int , a__ : Any ) -> Optional[Any]: for attribute in key.split('''.''' ): _UpperCamelCase = getattr(a__ , a__ ) if weight_type is not None: _UpperCamelCase = getattr(a__ , a__ ).shape else: _UpperCamelCase = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _UpperCamelCase = value elif weight_type == "weight_g": _UpperCamelCase = value elif weight_type == "weight_v": _UpperCamelCase = value elif weight_type == "bias": _UpperCamelCase = value else: _UpperCamelCase = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowercase ( a__ : str , a__ : Any , a__ : List[Any] ) -> List[Any]: _UpperCamelCase = [] _UpperCamelCase = fairseq_model.state_dict() _UpperCamelCase = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _UpperCamelCase = False if "conv_layers" in name: load_conv_layer( a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , ) _UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): _UpperCamelCase = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _UpperCamelCase = True if "*" in mapped_key: _UpperCamelCase = name.split(a__ )[0].split('''.''' )[-2] _UpperCamelCase = mapped_key.replace('''*''' , a__ ) if "weight_g" in name: _UpperCamelCase = '''weight_g''' elif "weight_v" in name: _UpperCamelCase = '''weight_v''' elif "weight" in name: _UpperCamelCase = '''weight''' elif "bias" in name: _UpperCamelCase = '''bias''' else: _UpperCamelCase = None set_recursively(a__ , a__ , a__ , a__ , a__ ) continue if not is_used: unused_weights.append(a__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase ( a__ : str , a__ : int , a__ : Optional[int] , a__ : Optional[Any] , a__ : int ) -> Any: _UpperCamelCase = full_name.split('''conv_layers.''' )[-1] _UpperCamelCase = name.split('''.''' ) _UpperCamelCase = int(items[0] ) _UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _UpperCamelCase = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _UpperCamelCase = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _UpperCamelCase = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _UpperCamelCase = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(a__ ) def lowercase ( a__ : List[str] , a__ : Optional[int] ) -> Union[str, Any]: _UpperCamelCase = SEWConfig() if is_finetuned: _UpperCamelCase = model.wav_encoder.wav_model.cfg else: _UpperCamelCase = model.cfg _UpperCamelCase = fs_config.conv_bias _UpperCamelCase = eval(fs_config.conv_feature_layers ) _UpperCamelCase = [x[0] for x in conv_layers] _UpperCamelCase = [x[1] for x in conv_layers] _UpperCamelCase = [x[2] for x in conv_layers] _UpperCamelCase = '''gelu''' _UpperCamelCase = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' _UpperCamelCase = 0.0 _UpperCamelCase = fs_config.activation_fn.name _UpperCamelCase = fs_config.encoder_embed_dim _UpperCamelCase = 0.02 _UpperCamelCase = fs_config.encoder_ffn_embed_dim _UpperCamelCase = 1e-5 _UpperCamelCase = fs_config.encoder_layerdrop _UpperCamelCase = fs_config.encoder_attention_heads _UpperCamelCase = fs_config.conv_pos_groups _UpperCamelCase = fs_config.conv_pos _UpperCamelCase = len(a__ ) _UpperCamelCase = fs_config.encoder_layers _UpperCamelCase = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: _UpperCamelCase = model.cfg _UpperCamelCase = fs_config.final_dropout _UpperCamelCase = fs_config.layerdrop _UpperCamelCase = fs_config.activation_dropout _UpperCamelCase = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 _UpperCamelCase = fs_config.attention_dropout _UpperCamelCase = fs_config.dropout_input _UpperCamelCase = fs_config.dropout _UpperCamelCase = fs_config.mask_channel_length _UpperCamelCase = fs_config.mask_channel_prob _UpperCamelCase = fs_config.mask_length _UpperCamelCase = fs_config.mask_prob _UpperCamelCase = '''Wav2Vec2FeatureExtractor''' _UpperCamelCase = '''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def lowercase ( a__ : List[Any] , a__ : Optional[Any] , a__ : str=None , a__ : Tuple=None , a__ : Tuple=True ) -> Union[str, Any]: if is_finetuned: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: _UpperCamelCase = SEWConfig.from_pretrained(a__ ) else: _UpperCamelCase = convert_config(model[0] , a__ ) _UpperCamelCase = model[0].eval() _UpperCamelCase = True if config.feat_extract_norm == '''layer''' else False _UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , ) if is_finetuned: if dict_path: _UpperCamelCase = Dictionary.load(a__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _UpperCamelCase = target_dict.pad_index _UpperCamelCase = target_dict.bos_index _UpperCamelCase = target_dict.pad_index _UpperCamelCase = target_dict.bos_index _UpperCamelCase = target_dict.eos_index _UpperCamelCase = len(target_dict.symbols ) _UpperCamelCase = os.path.join(a__ , '''vocab.json''' ) if not os.path.isdir(a__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(a__ ) ) return os.makedirs(a__ , exist_ok=a__ ) with open(a__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , a__ ) _UpperCamelCase = WavaVecaCTCTokenizer( a__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=a__ , ) _UpperCamelCase = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ ) processor.save_pretrained(a__ ) _UpperCamelCase = SEWForCTC(a__ ) else: _UpperCamelCase = SEWModel(a__ ) feature_extractor.save_pretrained(a__ ) recursively_load_weights(a__ , a__ , a__ ) hf_model.save_pretrained(a__ ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCAmelCase = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
256
0
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> str: """simple docstring""" if not (isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ )): raise ValueError("""longest_common_substring() takes two strings for inputs""" ) _lowerCAmelCase = len(snake_case_ ) _lowerCAmelCase = len(snake_case_ ) _lowerCAmelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] _lowerCAmelCase = 0 _lowerCAmelCase = 0 for i in range(1 , texta_length + 1 ): for j in range(1 , texta_length + 1 ): if texta[i - 1] == texta[j - 1]: _lowerCAmelCase = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: _lowerCAmelCase = i _lowerCAmelCase = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
317
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
317
1
"""simple docstring""" from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. UpperCamelCase_ = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. UpperCamelCase_ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. UpperCamelCase_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->tuple[str, float]: """simple docstring""" a_ = len([g for position, g in enumerate(UpperCAmelCase ) if g == main_target[position]] ) return (item, float(UpperCAmelCase )) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->tuple[str, str]: """simple docstring""" a_ = random.randint(0 , len(UpperCAmelCase ) - 1 ) a_ = parent_a[:random_slice] + parent_a[random_slice:] a_ = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->str: """simple docstring""" a_ = list(UpperCAmelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: a_ = random.choice(UpperCAmelCase ) return "".join(UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->list[str]: """simple docstring""" a_ = [] # Generate more children proportionally to the fitness score. a_ = int(parent_a[1] * 100 ) + 1 a_ = 10 if child_n >= 10 else child_n for _ in range(UpperCAmelCase ): a_ = population_score[random.randint(0 , UpperCAmelCase )][0] a_ , a_ = crossover(parent_a[0] , UpperCAmelCase ) # Append new string to the population list. pop.append(mutate(UpperCAmelCase , UpperCAmelCase ) ) pop.append(mutate(UpperCAmelCase , UpperCAmelCase ) ) return pop def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True ) ->tuple[int, int, str]: """simple docstring""" if N_POPULATION < N_SELECTED: a_ = F'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(UpperCAmelCase ) # Verify that the target contains no genes besides the ones inside genes variable. a_ = sorted({c for c in target if c not in genes} ) if not_in_genes_list: a_ = F'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(UpperCAmelCase ) # Generate random starting population. a_ = [] for _ in range(UpperCAmelCase ): population.append("".join([random.choice(UpperCAmelCase ) for i in range(len(UpperCAmelCase ) )] ) ) # Just some logs to know what the algorithms is doing. a_ , a_ = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(UpperCAmelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. a_ = [evaluate(UpperCAmelCase , UpperCAmelCase ) for item in population] # Check if there is a matching evolution. a_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x[1] , reverse=UpperCAmelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'''\nGeneration: {generation}''' F'''\nTotal Population:{total_population}''' F'''\nBest score: {population_score[0][1]}''' F'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. a_ = population[: int(N_POPULATION / 3 )] population.clear() population.extend(UpperCAmelCase ) # Normalize population score to be between 0 and 1. a_ = [ (item, score / len(UpperCAmelCase )) for item, score in population_score ] # This is selection for i in range(UpperCAmelCase ): population.extend(select(population_score[int(UpperCAmelCase )] , UpperCAmelCase , UpperCAmelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(UpperCAmelCase ) > N_POPULATION: break if __name__ == "__main__": UpperCamelCase_ = ( 'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!' ) UpperCamelCase_ = list( ' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm' 'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\' ) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list) print( F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}""" )
243
"""simple docstring""" UpperCamelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} UpperCamelCase_ = ['a', 'b', 'c', 'd', 'e'] def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]: """simple docstring""" a_ = start # add current to visited visited.append(UpperCAmelCase ) a_ = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: a_ = topological_sort(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # if all neighbors visited add current to sort sort.append(UpperCAmelCase ) # if all vertices haven't been visited select a new one to visit if len(UpperCAmelCase ) != len(UpperCAmelCase ): for vertice in vertices: if vertice not in visited: a_ = topological_sort(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # return sort return sort if __name__ == "__main__": UpperCamelCase_ = topological_sort('a', [], []) print(sort)
243
1
"""simple docstring""" import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class lowercase: '''simple docstring''' def __init__( self: Any, a_: Tuple, a_: List[str]=13, a_: List[Any]=7, a_: Optional[Any]=6, a_: Tuple=17, a_: Any=23, a_: str=11, a_: Optional[Any]=True, ): '''simple docstring''' _snake_case : Tuple = parent _snake_case : List[str] = batch_size _snake_case : Tuple = seq_length _snake_case : Dict = act_dim _snake_case : Union[str, Any] = state_dim _snake_case : Any = hidden_size _snake_case : List[Any] = max_length _snake_case : Optional[Any] = is_training def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Dict = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) _snake_case : Dict = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) _snake_case : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, 1) ) _snake_case : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) ) _snake_case : List[Any] = ids_tensor((self.batch_size, self.seq_length), vocab_size=1_000 ) _snake_case : Tuple = random_attention_mask((self.batch_size, self.seq_length) ) _snake_case : Optional[Any] = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' return DecisionTransformerConfig( batch_size=self.batch_size, seq_length=self.seq_length, act_dim=self.act_dim, state_dim=self.state_dim, hidden_size=self.hidden_size, max_length=self.max_length, ) def UpperCamelCase_ ( self: Optional[int], a_: Dict, a_: Tuple, a_: Optional[Any], a_: Optional[int], a_: Any, a_: Any, a_: Tuple, ): '''simple docstring''' _snake_case : List[Any] = DecisionTransformerModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Tuple = model(a_, a_, a_, a_, a_, a_ ) self.parent.assertEqual(result.state_preds.shape, states.shape ) self.parent.assertEqual(result.action_preds.shape, actions.shape ) self.parent.assertEqual(result.return_preds.shape, returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Optional[int] = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Union[str, Any] = config_and_inputs _snake_case : List[Any] = { """states""": states, """actions""": actions, """rewards""": rewards, """returns_to_go""": returns_to_go, """timesteps""": timesteps, """attention_mask""": attention_mask, } return config, inputs_dict @require_torch class lowercase( __a , __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = (DecisionTransformerModel,) if is_torch_available() else () lowercase__ = () lowercase__ = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids lowercase__ = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = DecisionTransformerModelTester(self ) _snake_case : List[Any] = ConfigTester(self, config_class=a_, hidden_size=37 ) def UpperCamelCase_ ( self: str ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) @slow def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = DecisionTransformerModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case : Dict = model_class(a_ ) _snake_case : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case : Optional[Any] = [*signature.parameters.keys()] _snake_case : Any = [ """states""", """actions""", """rewards""", """returns_to_go""", """timesteps""", """attention_mask""", ] self.assertListEqual(arg_names[: len(a_ )], a_ ) @require_torch class lowercase( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : str = 2 # number of steps of autoregressive prediction we will perform _snake_case : Optional[int] = 10 # defined by the RL environment, may be normalized _snake_case : List[Any] = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" ) _snake_case : str = model.to(a_ ) _snake_case : List[str] = model.config torch.manual_seed(0 ) _snake_case : Tuple = torch.randn(1, 1, config.state_dim ).to(device=a_, dtype=torch.floataa ) # env.reset() _snake_case : List[Any] = torch.tensor( [[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]], device=a_ ) _snake_case : List[str] = torch.tensor(a_, device=a_, dtype=torch.floataa ).reshape(1, 1, 1 ) _snake_case : Optional[Any] = state _snake_case : str = torch.zeros(1, 0, config.act_dim, device=a_, dtype=torch.floataa ) _snake_case : Tuple = torch.zeros(1, 0, device=a_, dtype=torch.floataa ) _snake_case : str = torch.tensor(0, device=a_, dtype=torch.long ).reshape(1, 1 ) for step in range(a_ ): _snake_case : Union[str, Any] = torch.cat([actions, torch.zeros(1, 1, config.act_dim, device=a_ )], dim=1 ) _snake_case : Union[str, Any] = torch.cat([rewards, torch.zeros(1, 1, device=a_ )], dim=1 ) _snake_case : Any = torch.ones(1, states.shape[1] ).to(dtype=torch.long, device=states.device ) with torch.no_grad(): _snake_case , _snake_case , _snake_case : List[Any] = model( states=a_, actions=a_, rewards=a_, returns_to_go=a_, timesteps=a_, attention_mask=a_, return_dict=a_, ) self.assertEqual(action_pred.shape, actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1], expected_outputs[step], atol=1E-4 ) ) _snake_case , _snake_case , _snake_case , _snake_case : Dict = ( # env.step(action) torch.randn(1, 1, config.state_dim ).to(device=a_, dtype=torch.floataa ), 1.0, False, {}, ) _snake_case : List[str] = action_pred[0, -1] _snake_case : Optional[int] = torch.cat([states, state], dim=1 ) _snake_case : Any = returns_to_go[0, -1] - reward _snake_case : Union[str, Any] = torch.cat([returns_to_go, pred_return.reshape(1, 1, 1 )], dim=1 ) _snake_case : Union[str, Any] = torch.cat( [timesteps, torch.ones((1, 1), device=a_, dtype=torch.long ) * (step + 1)], dim=1 )
132
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : int = 10_00 ): """simple docstring""" _snake_case , _snake_case : List[Any] = 1, 1 _snake_case : str = [] for i in range(1 , n + 1 ): _snake_case : Any = prev_numerator + 2 * prev_denominator _snake_case : Optional[Any] = prev_numerator + prev_denominator if len(str(snake_case__ ) ) > len(str(snake_case__ ) ): result.append(snake_case__ ) _snake_case : int = numerator _snake_case : Any = denominator return len(snake_case__ ) if __name__ == "__main__": print(F'''{solution() = }''')
132
1
'''simple docstring''' lowercase : str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): A : Union[str, Any] = F'a bytes-like object is required, not \'{data.__class__.__name__}\'' raise TypeError(snake_case__ ) A : Union[str, Any] = ''''''.join(bin(snake_case__ )[2:].zfill(8 ) for byte in data ) A : List[str] = len(snake_case__ ) % 6 != 0 if padding_needed: # The padding that will be added later A : Union[str, Any] = B'''=''' * ((6 - len(snake_case__ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(snake_case__ ) % 6) else: A : Optional[Any] = B'''''' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(snake_case__ ) , 6 ) ).encode() + padding ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) and not isinstance(snake_case__ , snake_case__ ): A : int = ( '''argument should be a bytes-like object or ASCII string, ''' F'not \'{encoded_data.__class__.__name__}\'' ) raise TypeError(snake_case__ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(snake_case__ , snake_case__ ): try: A : List[str] = encoded_data.decode('''utf-8''' ) except UnicodeDecodeError: raise ValueError('''base64 encoded data should only contain ASCII characters''' ) A : Any = encoded_data.count('''=''' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(snake_case__ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one A : int = encoded_data[:-padding] A : List[str] = ''''''.join( bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: A : int = ''''''.join( bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data ) A : List[str] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(snake_case__ ) , 8 ) ] return bytes(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def A_ ( *snake_case , **snake_case ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : str = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Union[str, Any] = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def A_ ( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 ) UpperCAmelCase : Dict = len(snake_case ) self.assertGreater(snake_case , 0 ) self.assertEqual( snake_case , [ { "score": ANY(snake_case ), "label": ANY(snake_case ), "box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )}, } for i in range(snake_case ) ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) UpperCAmelCase : Optional[Any] = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] , ) UpperCAmelCase : Tuple = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, {"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}}, ] ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" ) UpperCAmelCase : Optional[int] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ] , ) UpperCAmelCase : Union[str, Any] = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ] , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}}, ], ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A_ ( self ): '''simple docstring''' pass @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = 0.2 UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : str = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}}, ] , ) @require_torch @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = 2 UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" ) UpperCAmelCase : List[str] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , ) self.assertEqual( nested_simplify(snake_case , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}}, {"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}}, ] , )
311
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: _UpperCamelCase = None _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} _UpperCamelCase = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } _UpperCamelCase = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } _UpperCamelCase = '''▁''' class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : List[str] = AlbertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ) -> Tuple: '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. __UpperCAmelCase : Optional[Any] = ( AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase , normalized=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token ) super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCAmelCase : Any = do_lower_case __UpperCAmelCase : Any = remove_space __UpperCAmelCase : Optional[int] = keep_accents __UpperCAmelCase : List[str] = vocab_file __UpperCAmelCase : Dict = False if not self.vocab_file else True def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase : List[str] = [self.sep_token_id] __UpperCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = [self.sep_token_id] __UpperCAmelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__UpperCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __UpperCAmelCase : Union[str, Any] = os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
16
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor _UpperCamelCase = logging.get_logger(__name__) class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None: '''simple docstring''' warnings.warn( """The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use LayoutLMv2ImageProcessor instead.""" , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
16
1
"""simple docstring""" import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) set_seed(7_70) _lowercase = { '''c_attn''': '''att_proj''', '''c_proj''': '''out_proj''', '''c_fc''': '''in_proj''', '''transformer.''': '''''', '''h.''': '''layers.''', '''ln_1''': '''layernorm_1''', '''ln_2''': '''layernorm_2''', '''ln_f''': '''layernorm_final''', '''wpe''': '''position_embeds_layer''', '''wte''': '''input_embeds_layer''', } _lowercase = { '''text_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''text.pt''', }, '''coarse_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''coarse.pt''', }, '''fine_small''': { '''repo_id''': '''suno/bark''', '''file_name''': '''fine.pt''', }, '''text''': { '''repo_id''': '''suno/bark''', '''file_name''': '''text_2.pt''', }, '''coarse''': { '''repo_id''': '''suno/bark''', '''file_name''': '''coarse_2.pt''', }, '''fine''': { '''repo_id''': '''suno/bark''', '''file_name''': '''fine_2.pt''', }, } _lowercase = os.path.dirname(os.path.abspath(__file__)) _lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''') _lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''') def _snake_case ( snake_case__ : Tuple , snake_case__ : List[str]=False ): A = model_type if use_small: key += "_small" return os.path.join(snake_case__ , REMOTE_MODEL_PATHS[key]['file_name'] ) def _snake_case ( snake_case__ : Tuple , snake_case__ : Tuple ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) hf_hub_download(repo_id=snake_case__ , filename=snake_case__ , local_dir=snake_case__ ) def _snake_case ( snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Any=False , snake_case__ : List[str]="text" ): if model_type == "text": A = BarkSemanticModel A = BarkSemanticConfig A = BarkSemanticGenerationConfig elif model_type == "coarse": A = BarkCoarseModel A = BarkCoarseConfig A = BarkCoarseGenerationConfig elif model_type == "fine": A = BarkFineModel A = BarkFineConfig A = BarkFineGenerationConfig else: raise NotImplementedError() A = F'{model_type}_small' if use_small else model_type A = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(snake_case__ ): logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.' ) _download(model_info['repo_id'] , model_info['file_name'] ) A = torch.load(snake_case__ , map_location=snake_case__ ) # this is a hack A = checkpoint['model_args'] if "input_vocab_size" not in model_args: A = model_args['vocab_size'] A = model_args['vocab_size'] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments A = model_args.pop('n_head' ) A = model_args.pop('n_embd' ) A = model_args.pop('n_layer' ) A = ConfigClass(**checkpoint['model_args'] ) A = ModelClass(config=snake_case__ ) A = GenerationConfigClass() A = model_generation_config A = checkpoint['model'] # fixup checkpoint A = '_orig_mod.' for k, v in list(state_dict.items() ): if k.startswith(snake_case__ ): # replace part of the key with corresponding layer name in HF implementation A = k[len(snake_case__ ) :] for old_layer_name in new_layer_name_dict: A = new_k.replace(snake_case__ , new_layer_name_dict[old_layer_name] ) A = state_dict.pop(snake_case__ ) A = set(state_dict.keys() ) - set(model.state_dict().keys() ) A = {k for k in extra_keys if not k.endswith('.attn.bias' )} A = set(model.state_dict().keys() ) - set(state_dict.keys() ) A = {k for k in missing_keys if not k.endswith('.attn.bias' )} if len(snake_case__ ) != 0: raise ValueError(F'extra keys found: {extra_keys}' ) if len(snake_case__ ) != 0: raise ValueError(F'missing keys: {missing_keys}' ) model.load_state_dict(snake_case__ , strict=snake_case__ ) A = model.num_parameters(exclude_embeddings=snake_case__ ) A = checkpoint['best_val_loss'].item() logger.info(F'model loaded: {round(n_params/1e6 , 1 )}M params, {round(snake_case__ , 3 )} loss' ) model.eval() model.to(snake_case__ ) del checkpoint, state_dict return model def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[Any]=False , snake_case__ : List[Any]="text" ): if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() A = 'cpu' # do conversion on cpu A = _get_ckpt_path(snake_case__ , use_small=snake_case__ ) A = _load_model(snake_case__ , snake_case__ , model_type=snake_case__ , use_small=snake_case__ ) # load bark initial model A = _bark_load_model(snake_case__ , 'cpu' , model_type=snake_case__ , use_small=snake_case__ ) if model_type == "text": A = bark_model['model'] if model.num_parameters(exclude_embeddings=snake_case__ ) != bark_model.get_num_params(): raise ValueError('initial and new models don\'t have the same number of parameters' ) # check if same output as the bark model A = 5 A = 10 if model_type in ["text", "coarse"]: A = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) A = bark_model(snake_case__ )[0] A = model(snake_case__ ) # take last logits A = output_new_model_total.logits[:, [-1], :] else: A = 3 A = 8 A = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) A = model(snake_case__ , snake_case__ ) A = bark_model(snake_case__ , snake_case__ ) A = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError('initial and new outputs don\'t have the same shape' ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError('initial and new outputs are not equal' ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) model.save_pretrained(snake_case__ ) def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : int , snake_case__ : Union[str, Any] , ): A = os.path.join(snake_case__ , snake_case__ ) A = BarkSemanticConfig.from_pretrained(os.path.join(snake_case__ , 'config.json' ) ) A = BarkCoarseConfig.from_pretrained(os.path.join(snake_case__ , 'config.json' ) ) A = BarkFineConfig.from_pretrained(os.path.join(snake_case__ , 'config.json' ) ) A = EncodecConfig.from_pretrained('facebook/encodec_24khz' ) A = BarkSemanticModel.from_pretrained(snake_case__ ) A = BarkCoarseModel.from_pretrained(snake_case__ ) A = BarkFineModel.from_pretrained(snake_case__ ) A = EncodecModel.from_pretrained('facebook/encodec_24khz' ) A = BarkConfig.from_sub_model_configs( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) A = BarkModel(snake_case__ ) A = semantic A = coarseAcoustic A = fineAcoustic A = codec A = bark_generation_config Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) bark.save_pretrained(snake_case__ , repo_id=snake_case__ , push_to_hub=snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''') _lowercase = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
74
"""simple docstring""" import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk')) if NLTK_VERSION >= version.Version('3.6.4'): from nltk import word_tokenize lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n' lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n' lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): '''simple docstring''' def UpperCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''string''' ,id='''sequence''' ), '''references''': datasets.Value('''string''' ,id='''sequence''' ), } ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[ '''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''', '''https://en.wikipedia.org/wiki/METEOR''', ] ,) def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict: """simple docstring""" import nltk nltk.download('''wordnet''' ) if NLTK_VERSION >= version.Version('''3.6.5''' ): nltk.download('''punkt''' ) if NLTK_VERSION >= version.Version('''3.6.6''' ): nltk.download('''omw-1.4''' ) def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]: """simple docstring""" if NLTK_VERSION >= version.Version('''3.6.5''' ): lowercase__ : int = [ meteor_score.single_meteor_score( word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case ) for ref, pred in zip(_snake_case ,_snake_case ) ] else: lowercase__ : Tuple = [ meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case ) for ref, pred in zip(_snake_case ,_snake_case ) ] return {"meteor": np.mean(_snake_case )}
16
0
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> list[tuple[int, int]]: lowerCamelCase__ , lowerCamelCase__ : List[str] = position lowerCamelCase__ : List[str] = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] lowerCamelCase__ : List[str] = [] for position in positions: lowerCamelCase__ , lowerCamelCase__ : List[Any] = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(UpperCamelCase ) return permissible_positions def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bool: return not any(elem == 0 for row in board for elem in row ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> bool: if is_complete(UpperCamelCase ): return True for position in get_valid_pos(UpperCamelCase , len(UpperCamelCase ) ): lowerCamelCase__ , lowerCamelCase__ : Tuple = position if board[y][x] == 0: lowerCamelCase__ : Dict = curr + 1 if open_knight_tour_helper(UpperCamelCase , UpperCamelCase , curr + 1 ): return True lowerCamelCase__ : Any = 0 return False def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list[list[int]]: lowerCamelCase__ : Tuple = [[0 for i in range(UpperCamelCase )] for j in range(UpperCamelCase )] for i in range(UpperCamelCase ): for j in range(UpperCamelCase ): lowerCamelCase__ : Optional[int] = 1 if open_knight_tour_helper(UpperCamelCase , (i, j) , 1 ): return board lowerCamelCase__ : Union[str, Any] = 0 lowerCamelCase__ : List[str] = f'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
129
'''simple docstring''' import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) _A : str ={ '''sample_size''': 32, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': 1_000, '''block_out_channels''': [32, 64], '''attention_head_dim''': 8, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } _A : Union[str, Any] ={ '''sample_size''': 64, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 3, '''num_class_embeds''': 1_000, '''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } _A : Dict ={ '''sample_size''': 256, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': None, '''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''default''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } _A : Dict ={ '''num_train_timesteps''': 40, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } _A : str ={ '''num_train_timesteps''': 201, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } _A : int ={ '''num_train_timesteps''': 151, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict: if isinstance(UpperCamelCase , UpperCamelCase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("""boolean value expected""" ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ) -> Any: lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.0.weight'''] lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.in_layers.0.bias'''] lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.weight'''] lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.bias'''] lowerCamelCase__ : Optional[Any] = checkpoint[f'''{old_prefix}.emb_layers.1.weight'''] lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.emb_layers.1.bias'''] lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.out_layers.0.weight'''] lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.out_layers.0.bias'''] lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.out_layers.3.weight'''] lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.out_layers.3.bias'''] if has_skip: lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.skip_connection.weight'''] lowerCamelCase__ : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> str: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 ) lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.norm.weight'''] lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.norm.bias'''] lowerCamelCase__ : List[Any] = weight_q.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : Any = weight_k.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : Dict = weight_v.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : Union[str, Any] = bias_v.squeeze(-1 ).squeeze(-1 ) lowerCamelCase__ : Optional[Any] = ( checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCamelCase__ : str = torch.load(UpperCamelCase , map_location="""cpu""" ) lowerCamelCase__ : Optional[int] = {} lowerCamelCase__ : Optional[int] = checkpoint["""time_embed.0.weight"""] lowerCamelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""] lowerCamelCase__ : int = checkpoint["""time_embed.2.weight"""] lowerCamelCase__ : Optional[Any] = checkpoint["""time_embed.2.bias"""] if unet_config["num_class_embeds"] is not None: lowerCamelCase__ : Optional[Any] = checkpoint["""label_emb.weight"""] lowerCamelCase__ : Tuple = checkpoint["""input_blocks.0.0.weight"""] lowerCamelCase__ : List[str] = checkpoint["""input_blocks.0.0.bias"""] lowerCamelCase__ : Optional[Any] = unet_config["""down_block_types"""] lowerCamelCase__ : Any = unet_config["""layers_per_block"""] lowerCamelCase__ : Any = unet_config["""attention_head_dim"""] lowerCamelCase__ : List[Any] = unet_config["""block_out_channels"""] lowerCamelCase__ : str = 1 lowerCamelCase__ : str = channels_list[0] for i, layer_type in enumerate(UpperCamelCase ): lowerCamelCase__ : List[Any] = channels_list[i] lowerCamelCase__ : List[Any] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(UpperCamelCase ): lowerCamelCase__ : int = f'''down_blocks.{i}.resnets.{j}''' lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.0''' lowerCamelCase__ : Tuple = True if j == 0 and downsample_block_has_skip else False lowerCamelCase__ : List[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(UpperCamelCase ): lowerCamelCase__ : Tuple = f'''down_blocks.{i}.resnets.{j}''' lowerCamelCase__ : Optional[Any] = f'''input_blocks.{current_layer}.0''' lowerCamelCase__ : str = True if j == 0 and downsample_block_has_skip else False lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase ) lowerCamelCase__ : Any = f'''down_blocks.{i}.attentions.{j}''' lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.1''' lowerCamelCase__ : Tuple = convert_attention( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) current_layer += 1 if i != len(UpperCamelCase ) - 1: lowerCamelCase__ : Tuple = f'''down_blocks.{i}.downsamplers.0''' lowerCamelCase__ : str = f'''input_blocks.{current_layer}.0''' lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) current_layer += 1 lowerCamelCase__ : Union[str, Any] = current_channels # hardcoded the mid-block for now lowerCamelCase__ : Any = """mid_block.resnets.0""" lowerCamelCase__ : Optional[Any] = """middle_block.0""" lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : List[Any] = """mid_block.attentions.0""" lowerCamelCase__ : Dict = """middle_block.1""" lowerCamelCase__ : int = convert_attention(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Any = """mid_block.resnets.1""" lowerCamelCase__ : Tuple = """middle_block.2""" lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = 0 lowerCamelCase__ : Any = unet_config["""up_block_types"""] for i, layer_type in enumerate(UpperCamelCase ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): lowerCamelCase__ : int = f'''up_blocks.{i}.resnets.{j}''' lowerCamelCase__ : Optional[Any] = f'''output_blocks.{current_layer}.0''' lowerCamelCase__ : Any = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase ) current_layer += 1 if i != len(UpperCamelCase ) - 1: lowerCamelCase__ : Dict = f'''up_blocks.{i}.upsamplers.0''' lowerCamelCase__ : List[str] = f'''output_blocks.{current_layer-1}.1''' lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): lowerCamelCase__ : str = f'''up_blocks.{i}.resnets.{j}''' lowerCamelCase__ : List[Any] = f'''output_blocks.{current_layer}.0''' lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase ) lowerCamelCase__ : Optional[Any] = f'''up_blocks.{i}.attentions.{j}''' lowerCamelCase__ : Any = f'''output_blocks.{current_layer}.1''' lowerCamelCase__ : Optional[int] = convert_attention( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) current_layer += 1 if i != len(UpperCamelCase ) - 1: lowerCamelCase__ : Tuple = f'''up_blocks.{i}.upsamplers.0''' lowerCamelCase__ : Tuple = f'''output_blocks.{current_layer-1}.2''' lowerCamelCase__ : List[str] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Dict = checkpoint["""out.0.weight"""] lowerCamelCase__ : Dict = checkpoint["""out.0.bias"""] lowerCamelCase__ : Dict = checkpoint["""out.2.weight"""] lowerCamelCase__ : Tuple = checkpoint["""out.2.bias"""] return new_checkpoint if __name__ == "__main__": _A : Tuple =argparse.ArgumentParser() parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''') parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.''' ) parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''') _A : Tuple =parser.parse_args() _A : Optional[int] =strabool(args.class_cond) _A : List[str] =os.path.basename(args.unet_path) print(F'Checkpoint: {ckpt_name}') # Get U-Net config if "imagenet64" in ckpt_name: _A : int =IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _A : Tuple =LSUN_256_UNET_CONFIG elif "test" in ckpt_name: _A : Any =TEST_UNET_CONFIG else: raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.') if not args.class_cond: _A : str =None _A : Optional[int] =con_pt_to_diffuser(args.unet_path, unet_config) _A : Optional[int] =UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: _A : Tuple =CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: _A : int =CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _A : Union[str, Any] =CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.') _A : str =CMStochasticIterativeScheduler(**scheduler_config) _A : Optional[Any] =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
129
1
'''simple docstring''' import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class __A ( unittest.TestCase ): '''simple docstring''' def a__ (self , A , A , A ) -> str: """simple docstring""" self.assertEqual(len(A ) , len(A ) ) for a, b in zip(A , A ): self.assertAlmostEqual(A , A , delta=A ) def a__ (self ) -> Union[str, Any]: """simple docstring""" _a = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(A ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 ) def a__ (self ) -> Union[str, Any]: """simple docstring""" _a = None ops.enable_eager_execution_internal() _a = tf.config.list_physical_devices('''CPU''' ) if len(A ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) _a = tf.config.list_logical_devices(device_type='''CPU''' ) _a = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): _a = GradientAccumulator() _a = tf.Variable([4.0, 3.0] ) _a , _a = create_optimizer(5E-5 , 10 , 5 ) _a = tf.Variable([0.0, 0.0] , trainable=A ) def accumulate_on_replica(A ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(A , A ): with strategy.scope(): _a = strategy.experimental_local_results(A ) local_variables[0].assign(A ) local_variables[1].assign(A ) strategy.run(A , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(A ) def _check_local_values(A , A ): _a = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , A , tol=1E-2 ) self.assertListAlmostEqual(values[1].value() , A , tol=1E-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
211
'''simple docstring''' import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class __A ( enum.Enum ): '''simple docstring''' __lowerCamelCase : Optional[int] = 0 __lowerCamelCase : Any = 1 __lowerCamelCase : List[Any] = 2 @add_end_docstrings(A ) class __A ( A ): '''simple docstring''' __lowerCamelCase : Dict = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n ' def __init__(self , *A , **A ) -> Tuple: """simple docstring""" super().__init__(*A , **A ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _a = None if self.model.config.prefix is not None: _a = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _a = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _a , _a , _a = self._sanitize_parameters(prefix=A , **self._forward_params ) _a = {**self._preprocess_params, **preprocess_params} _a = {**self._forward_params, **forward_params} def a__ (self , A=None , A=None , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> str: """simple docstring""" _a = {} if prefix is not None: _a = prefix if prefix: _a = self.tokenizer( A , padding=A , add_special_tokens=A , return_tensors=self.framework ) _a = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected''' ''' [None, \'hole\']''' ) _a = handle_long_generation preprocess_params.update(A ) _a = generate_kwargs _a = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) _a = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) _a = ReturnType.TENSORS if return_type is not None: _a = return_type if clean_up_tokenization_spaces is not None: _a = clean_up_tokenization_spaces if stop_sequence is not None: _a = self.tokenizer.encode(A , add_special_tokens=A ) if len(A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) _a = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def a__ (self , *A , **A ) -> List[Any]: """simple docstring""" if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*A , **A ) def __call__(self , A , **A ) -> int: """simple docstring""" return super().__call__(A , **A ) def a__ (self , A , A="" , A=None , **A ) -> Any: """simple docstring""" _a = self.tokenizer( prefix + prompt_text , padding=A , add_special_tokens=A , return_tensors=self.framework ) _a = prompt_text if handle_long_generation == "hole": _a = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: _a = generate_kwargs['''max_new_tokens'''] else: _a = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: _a = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) _a = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: _a = inputs['''attention_mask'''][:, -keep_length:] return inputs def a__ (self , A , **A ) -> Any: """simple docstring""" _a = model_inputs['''input_ids'''] _a = model_inputs.get('''attention_mask''' , A ) # Allow empty prompts if input_ids.shape[1] == 0: _a = None _a = None _a = 1 else: _a = input_ids.shape[0] _a = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _a = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: _a = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: _a = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _a = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _a = self.model.generate(input_ids=A , attention_mask=A , **A ) _a = generated_sequence.shape[0] if self.framework == "pt": _a = generated_sequence.reshape(A , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": _a = tf.reshape(A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def a__ (self , A , A=ReturnType.FULL_TEXT , A=True ) -> str: """simple docstring""" _a = model_outputs['''generated_sequence'''][0] _a = model_outputs['''input_ids'''] _a = model_outputs['''prompt_text'''] _a = generated_sequence.numpy().tolist() _a = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _a = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _a = self.tokenizer.decode( A , skip_special_tokens=A , clean_up_tokenization_spaces=A , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _a = 0 else: _a = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=A , clean_up_tokenization_spaces=A , ) ) if return_type == ReturnType.FULL_TEXT: _a = prompt_text + text[prompt_length:] else: _a = text[prompt_length:] _a = {'''generated_text''': all_text} records.append(A ) return records
211
1
SCREAMING_SNAKE_CASE :Dict = { '''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''', '''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''', '''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''', '''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''', '''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''', '''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''', ''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''', '''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''', '''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/''' } # Exclamation mark is not in ITU-R recommendation # fmt: on SCREAMING_SNAKE_CASE :List[Any] = {value: key for key, value in MORSE_CODE_DICT.items()} def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Optional[Any]: """simple docstring""" return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> int: """simple docstring""" return "".join(REVERSE_DICT[char] for char in message.split() ) def lowerCAmelCase( )-> int: """simple docstring""" UpperCamelCase_ = """Morse code here!""" print(snake_case_ ) UpperCamelCase_ = encrypt(snake_case_ ) print(snake_case_ ) UpperCamelCase_ = decrypt(snake_case_ ) print(snake_case_ ) if __name__ == "__main__": main()
355
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets SCREAMING_SNAKE_CASE :Optional[int] = """\ @inproceedings{snover-etal-2006-study, title = \"A Study of Translation Edit Rate with Targeted Human Annotation\", author = \"Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John\", booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\", month = aug # \" 8-12\", year = \"2006\", address = \"Cambridge, Massachusetts, USA\", publisher = \"Association for Machine Translation in the Americas\", url = \"https://aclanthology.org/2006.amta-papers.25\", pages = \"223--231\", } @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ SCREAMING_SNAKE_CASE :Optional[int] = """\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. """ SCREAMING_SNAKE_CASE :Tuple = """ Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: 'score' (float): TER score (num_edits / sum_ref_lengths * 100) 'num_edits' (int): The cumulative number of edits 'ref_length' (float): The cumulative average reference length Examples: Example 1: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\", ... \"What did the TER metric user say to the developer?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"], ... [\"Your jokes are...\", \"...TERrible\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0} Example 2: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0} Example 3: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5} Example 4: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0} Example 5: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\", ... \"What did the TER metric user say to the developer?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"], ... [\"Your jokes are...\", \"...TERrible\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): def UpperCAmelCase_ ( self )-> Tuple: if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , )-> int: UpperCamelCase_ = len(references[0] ) if any(len(_lowercase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) UpperCamelCase_ = [[refs[i] for refs in references] for i in range(_lowercase )] UpperCamelCase_ = TER( normalized=_lowercase , no_punct=_lowercase , asian_support=_lowercase , case_sensitive=_lowercase , ) UpperCamelCase_ = sb_ter.corpus_score(_lowercase , _lowercase ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
60
0
'''simple docstring''' import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : Any ): snake_case__ : Union[str, Any] = inspect.getfile(accelerate.test_utils ) snake_case__ : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] ) snake_case__ : Tuple = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def lowerCamelCase ( self : Tuple ): snake_case__ : Optional[Any] = f"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split() snake_case__ : str = [sys.executable] + distributed_args execute_subprocess_async(snake_case_ , env=os.environ.copy() )
35
'''simple docstring''' __a = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __a = frozenset(["prompt", "negative_prompt"]) __a = frozenset([]) __a = frozenset(["image"]) __a = frozenset( [ "image", "height", "width", "guidance_scale", ] ) __a = frozenset(["image"]) __a = frozenset( [ "prompt", "image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __a = frozenset(["prompt", "image", "negative_prompt"]) __a = frozenset( [ # Text guided image variation with an image mask "prompt", "image", "mask_image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __a = frozenset(["prompt", "image", "mask_image", "negative_prompt"]) __a = frozenset( [ # image variation with an image mask "image", "mask_image", "height", "width", "guidance_scale", ] ) __a = frozenset(["image", "mask_image"]) __a = frozenset( [ "example_image", "image", "mask_image", "height", "width", "guidance_scale", ] ) __a = frozenset(["example_image", "image", "mask_image"]) __a = frozenset(["class_labels"]) __a = frozenset(["class_labels"]) __a = frozenset(["batch_size"]) __a = frozenset([]) __a = frozenset(["batch_size"]) __a = frozenset([]) __a = frozenset( [ "prompt", "audio_length_in_s", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __a = frozenset(["prompt", "negative_prompt"]) __a = frozenset(["input_tokens"]) __a = frozenset(["input_tokens"])
35
1
"""simple docstring""" # This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): """simple docstring""" A = multiprocessing.Manager() A = manager.list() A = multiprocessing.Process(target=lowercase__ , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("timed out" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ): """simple docstring""" with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil A = shutil.rmtree A = os.rmdir A = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: A = {} with swallow_io(): with time_limit(lowercase__ ): exec(lowercase__ , lowercase__ ) result.append("passed" ) except TimeoutException: result.append("timed out" ) except BaseException as e: result.append(F"""failed: {e}""" ) # Needed for cleaning up. A = rmtree A = rmdir A = chdir @contextlib.contextmanager def __SCREAMING_SNAKE_CASE ( lowercase__ ): """simple docstring""" def signal_handler(lowercase__ , lowercase__ ): raise TimeoutException("Timed out!" ) signal.setitimer(signal.ITIMER_REAL , lowercase__ ) signal.signal(signal.SIGALRM , lowercase__ ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" A = WriteOnlyStringIO() with contextlib.redirect_stdout(lowercase__ ): with contextlib.redirect_stderr(lowercase__ ): with redirect_stdin(lowercase__ ): yield @contextlib.contextmanager def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" with tempfile.TemporaryDirectory() as dirname: with chdir(lowercase__ ): yield dirname class __UpperCamelCase ( _A ): pass class __UpperCamelCase ( io.StringIO ): def SCREAMING_SNAKE_CASE__ (self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : List[Any]): raise OSError def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : Tuple): raise OSError def SCREAMING_SNAKE_CASE__ (self : Dict , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Union[str, Any]): raise OSError def SCREAMING_SNAKE_CASE__ (self : Any , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : List[Any]): return False class __UpperCamelCase ( contextlib._RedirectStream ): # type: ignore SCREAMING_SNAKE_CASE = "stdin" @contextlib.contextmanager def __SCREAMING_SNAKE_CASE ( lowercase__ ): """simple docstring""" if root == ".": yield return A = os.getcwd() os.chdir(lowercase__ ) try: yield except BaseException as exc: raise exc finally: os.chdir(lowercase__ ) def __SCREAMING_SNAKE_CASE ( lowercase__=None ): """simple docstring""" if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins A = None A = None import os A = "1" A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None A = None import shutil A = None A = None A = None import subprocess A = None # type: ignore A = None import sys A = None A = None A = None A = None A = None
57
"""simple docstring""" class __UpperCamelCase ( _A ): pass class __UpperCamelCase ( _A ): pass class __UpperCamelCase : def __init__(self : Tuple): A = [ [], [], [], ] def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int): try: if len(self.queues[priority]) >= 1_0_0: raise OverflowError("Maximum queue size is 100") self.queues[priority].append(__SCREAMING_SNAKE_CASE) except IndexError: raise ValueError("Valid priorities are 0, 1, and 2") def SCREAMING_SNAKE_CASE__ (self : List[str]): for queue in self.queues: if queue: return queue.pop(0) raise UnderFlowError("All queues are empty") def __str__(self : Any): return "\n".join(F"""Priority {i}: {q}""" for i, q in enumerate(self.queues)) class __UpperCamelCase : def __init__(self : Optional[Any]): A = [] def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int): if len(self.queue) == 1_0_0: raise OverFlowError("Maximum queue size is 100") self.queue.append(__SCREAMING_SNAKE_CASE) def SCREAMING_SNAKE_CASE__ (self : Dict): if not self.queue: raise UnderFlowError("The queue is empty") else: A = min(self.queue) self.queue.remove(__SCREAMING_SNAKE_CASE) return data def __str__(self : List[str]): return str(self.queue) def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" A = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(lowercase__ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(lowercase__ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" A = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(lowercase__ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(lowercase__ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
57
1
_lowercase : Optional[Any] ="\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" _lowercase : Any =[{"type": "code", "content": INSTALL_CONTENT}] _lowercase : List[Any] ={ "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
170
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : int =logging.get_logger(__name__) _lowercase : Optional[Any] ={"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"} class snake_case__ (A__ ): """simple docstring""" __lowerCAmelCase :Optional[int] = "openai-gpt" __lowerCAmelCase :Any = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , __lowercase=4_0_4_7_8 , __lowercase=5_1_2 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-5 , __lowercase=0.0_2 , __lowercase="cls_index" , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=0.1 , **__lowercase , ) -> Optional[int]: """simple docstring""" a__ : Tuple = vocab_size a__ : Union[str, Any] = n_positions a__ : int = n_embd a__ : Dict = n_layer a__ : Dict = n_head a__ : List[str] = afn a__ : List[str] = resid_pdrop a__ : List[Any] = embd_pdrop a__ : List[str] = attn_pdrop a__ : Dict = layer_norm_epsilon a__ : List[str] = initializer_range a__ : Tuple = summary_type a__ : Union[str, Any] = summary_use_proj a__ : Optional[Any] = summary_activation a__ : Union[str, Any] = summary_first_dropout a__ : Optional[Any] = summary_proj_to_labels super().__init__(**__lowercase )
170
1
"""simple docstring""" import os def _snake_case ( ): with open(os.path.dirname(UpperCamelCase ) + """/p022_names.txt""" ) as file: UpperCAmelCase : str = str(file.readlines()[0] ) UpperCAmelCase : Optional[int] = names.replace("""\"""" , """""" ).split(""",""" ) names.sort() UpperCAmelCase : Any = 0 UpperCAmelCase : List[Any] = 0 for i, name in enumerate(UpperCamelCase ): for letter in name: name_score += ord(UpperCamelCase ) - 64 total_score += (i + 1) * name_score UpperCAmelCase : str = 0 return total_score if __name__ == "__main__": print(solution())
350
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): @slow @require_torch def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : Any = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) UpperCAmelCase : Optional[int] = BertTokenizer.from_pretrained("""bert-base-uncased""" ) UpperCAmelCase : Tuple = bertabert.config.encoder.vocab_size UpperCAmelCase : int = tokenizer.sep_token_id UpperCAmelCase : Dict = tokenizer.cls_token_id UpperCAmelCase : int = 128 UpperCAmelCase : List[str] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) UpperCAmelCase : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) UpperCAmelCase : Optional[int] = train_dataset.select(range(32 ) ) UpperCAmelCase : int = val_dataset.select(range(16 ) ) UpperCAmelCase : List[str] = 4 def _map_to_encoder_decoder_inputs(_SCREAMING_SNAKE_CASE ): # Tokenizer will automatically set [BOS] <text> [EOS] UpperCAmelCase : str = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=512 ) UpperCAmelCase : str = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=128 ) UpperCAmelCase : Optional[Any] = inputs.input_ids UpperCAmelCase : Union[str, Any] = inputs.attention_mask UpperCAmelCase : Union[str, Any] = outputs.input_ids UpperCAmelCase : Any = outputs.input_ids.copy() UpperCAmelCase : Tuple = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] UpperCAmelCase : List[Any] = outputs.attention_mask assert all(len(_SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids ) assert all(len(_SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(_SCREAMING_SNAKE_CASE ): UpperCAmelCase : Optional[Any] = pred.label_ids UpperCAmelCase : Tuple = pred.predictions # all unnecessary tokens are removed UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] ) / len(_SCREAMING_SNAKE_CASE ) return {"accuracy": accuracy} # map train dataset UpperCAmelCase : List[Any] = train_dataset.map( _map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset UpperCAmelCase : List[str] = val_dataset.map( _map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) UpperCAmelCase : Dict = self.get_auto_remove_tmp_dir() UpperCAmelCase : Dict = SeqaSeqTrainingArguments( output_dir=_SCREAMING_SNAKE_CASE , per_device_train_batch_size=_SCREAMING_SNAKE_CASE , per_device_eval_batch_size=_SCREAMING_SNAKE_CASE , predict_with_generate=_SCREAMING_SNAKE_CASE , evaluation_strategy="""steps""" , do_train=_SCREAMING_SNAKE_CASE , do_eval=_SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer UpperCAmelCase : List[str] = SeqaSeqTrainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) # start training trainer.train()
76
0
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def _a ( *SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[Dict, Any]] = None , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Tuple=2 ) -> Any: """simple docstring""" from .. import __version__ __lowerCAmelCase: List[str] = take_from __lowerCAmelCase: int = () if not isinstance(args[0] , _snake_case ): __lowerCAmelCase: List[Any] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(_snake_case ).base_version ) >= version.parse(_snake_case ): raise ValueError( f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'''' f''' version {__version__} is >= {version_name}''' ) __lowerCAmelCase: Tuple = None if isinstance(_snake_case , _snake_case ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(_snake_case ),) __lowerCAmelCase: Union[str, Any] = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.''' elif hasattr(_snake_case , _snake_case ): values += (getattr(_snake_case , _snake_case ),) __lowerCAmelCase: int = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.''' elif deprecated_kwargs is None: __lowerCAmelCase: List[Any] = f'''`{attribute}` is deprecated and will be removed in version {version_name}.''' if warning is not None: __lowerCAmelCase: Dict = warning + " " if standard_warn else "" warnings.warn(warning + message , _snake_case , stacklevel=_snake_case ) if isinstance(_snake_case , _snake_case ) and len(_snake_case ) > 0: __lowerCAmelCase: str = inspect.getouterframes(inspect.currentframe() )[1] __lowerCAmelCase: Tuple = call_frame.filename __lowerCAmelCase: int = call_frame.lineno __lowerCAmelCase: int = call_frame.function __lowerCAmelCase: List[Any] = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' ) if len(_snake_case ) == 0: return elif len(_snake_case ) == 1: return values[0] return values
322
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def lowerCAmelCase_ ( ) -> str: '''simple docstring''' __magic_name__ : int = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png" __magic_name__ : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("RGB" ) return image def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]: '''simple docstring''' __magic_name__ : List[str] = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") ) # fmt: on return rename_keys def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ) -> int: '''simple docstring''' __magic_name__ : Tuple = dct.pop(_snake_case ) __magic_name__ : int = val def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Optional[Any] ) -> Dict: '''simple docstring''' for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases __magic_name__ : List[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) __magic_name__ : Optional[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict __magic_name__ : Optional[int] = torch.cat((q_bias, torch.zeros_like(_snake_case , requires_grad=_snake_case ), v_bias) ) __magic_name__ : Union[str, Any] = qkv_bias def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : str ) -> int: '''simple docstring''' __magic_name__ : List[Any] = 364 if "coco" in model_name else 224 __magic_name__ : Union[str, Any] = BlipaVisionConfig(image_size=_snake_case ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: __magic_name__ : List[str] = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=_snake_case ).to_dict() elif "opt-6.7b" in model_name: __magic_name__ : Any = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=_snake_case ).to_dict() elif "t5-xl" in model_name: __magic_name__ : Dict = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: __magic_name__ : int = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() __magic_name__ : List[Any] = BlipaConfig(vision_config=_snake_case , text_config=_snake_case ) return config, image_size @torch.no_grad() def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : str=None , _snake_case : Dict=False ) -> List[Any]: '''simple docstring''' __magic_name__ : Optional[int] = ( AutoTokenizer.from_pretrained("facebook/opt-2.7b" ) if "opt" in model_name else AutoTokenizer.from_pretrained("google/flan-t5-xl" ) ) __magic_name__ : List[Any] = tokenizer("\n" , add_special_tokens=_snake_case ).input_ids[0] __magic_name__ , __magic_name__ : Tuple = get_blipa_config(_snake_case , eos_token_id=_snake_case ) __magic_name__ : Union[str, Any] = BlipaForConditionalGeneration(_snake_case ).eval() __magic_name__ : Any = { "blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"), "blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"), "blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"), "blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"), "blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"), "blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"), "blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"), } __magic_name__ , __magic_name__ : Union[str, Any] = model_name_to_original[model_name] # load original model print("Loading original model..." ) __magic_name__ : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu" __magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = load_model_and_preprocess( name=_snake_case , model_type=_snake_case , is_eval=_snake_case , device=_snake_case ) original_model.eval() print("Done!" ) # update state dict keys __magic_name__ : Dict = original_model.state_dict() __magic_name__ : str = create_rename_keys(_snake_case ) for src, dest in rename_keys: rename_key(_snake_case , _snake_case , _snake_case ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): __magic_name__ : Any = state_dict.pop(_snake_case ) if key.startswith("Qformer.bert" ): __magic_name__ : Optional[int] = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: __magic_name__ : Any = key.replace("self" , "attention" ) if "opt_proj" in key: __magic_name__ : Union[str, Any] = key.replace("opt_proj" , "language_projection" ) if "t5_proj" in key: __magic_name__ : Optional[int] = key.replace("t5_proj" , "language_projection" ) if key.startswith("opt" ): __magic_name__ : List[str] = key.replace("opt" , "language" ) if key.startswith("t5" ): __magic_name__ : Tuple = key.replace("t5" , "language" ) __magic_name__ : Dict = val # read in qv biases read_in_q_v_bias(_snake_case , _snake_case ) __magic_name__ , __magic_name__ : Tuple = hf_model.load_state_dict(_snake_case , strict=_snake_case ) assert len(_snake_case ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] __magic_name__ : List[Any] = load_demo_image() __magic_name__ : Tuple = vis_processors["eval"](_snake_case ).unsqueeze(0 ).to(_snake_case ) __magic_name__ : Dict = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(_snake_case ) # create processor __magic_name__ : Optional[Any] = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=_snake_case , image_std=_snake_case ) __magic_name__ : Dict = BlipaProcessor(image_processor=_snake_case , tokenizer=_snake_case ) __magic_name__ : Union[str, Any] = processor(images=_snake_case , return_tensors="pt" ).pixel_values.to(_snake_case ) # make sure processor creates exact same pixel values assert torch.allclose(_snake_case , _snake_case ) original_model.to(_snake_case ) hf_model.to(_snake_case ) with torch.no_grad(): if "opt" in model_name: __magic_name__ : List[Any] = original_model({"image": original_pixel_values, "text_input": [""]} ).logits __magic_name__ : Optional[int] = hf_model(_snake_case , _snake_case ).logits else: __magic_name__ : int = original_model( {"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits __magic_name__ : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 ) __magic_name__ : List[str] = hf_model(_snake_case , _snake_case , labels=_snake_case ).logits assert original_logits.shape == logits.shape print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": __magic_name__ : List[str] = torch.tensor( [[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=_snake_case ) assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1E-4 ) elif model_name == "blip2-flan-t5-xl-coco": __magic_name__ : Tuple = torch.tensor( [[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=_snake_case ) else: # cast to same type __magic_name__ : str = logits.dtype assert torch.allclose(original_logits.to(_snake_case ) , _snake_case , atol=1E-2 ) print("Looks ok!" ) print("Generating a caption..." ) __magic_name__ : Optional[int] = "" __magic_name__ : Dict = tokenizer(_snake_case , return_tensors="pt" ).input_ids.to(_snake_case ) __magic_name__ : int = original_model.generate({"image": original_pixel_values} ) __magic_name__ : Optional[Any] = hf_model.generate( _snake_case , _snake_case , do_sample=_snake_case , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("Original generation:" , _snake_case ) __magic_name__ : Tuple = input_ids.shape[1] __magic_name__ : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_snake_case ) __magic_name__ : Union[str, Any] = [text.strip() for text in output_text] print("HF generation:" , _snake_case ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_snake_case ) hf_model.save_pretrained(_snake_case ) if push_to_hub: processor.push_to_hub(F'''nielsr/{model_name}''' ) hf_model.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": snake_case : Any = argparse.ArgumentParser() snake_case : Union[str, Any] = [ "blip2-opt-2.7b", "blip2-opt-6.7b", "blip2-opt-2.7b-coco", "blip2-opt-6.7b-coco", "blip2-flan-t5-xl", "blip2-flan-t5-xl-coco", "blip2-flan-t5-xxl", ] parser.add_argument( "--model_name", default="blip2-opt-2.7b", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) snake_case : int = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
281
0
from __future__ import annotations import math def lowerCamelCase_ ( _a : float , _a : int ): '''simple docstring''' UpperCAmelCase_ : str = u for i in range(1 , _A ): UpperCAmelCase_ : List[str] = temp * (u - i) return temp def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : int = int(input("""enter the numbers of values: """ ) ) UpperCAmelCase_ : str = [] for _ in range(_A ): y.append([] ) for i in range(_A ): for j in range(_A ): y[i].append(_A ) UpperCAmelCase_ : List[str] = 0 print("""enter the values of parameters in a list: """ ) UpperCAmelCase_ : Tuple = list(map(_A , input().split() ) ) print("""enter the values of corresponding parameters: """ ) for i in range(_A ): UpperCAmelCase_ : List[str] = float(input() ) UpperCAmelCase_ : List[str] = int(input("""enter the value to interpolate: """ ) ) UpperCAmelCase_ : Tuple = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , _A ): for j in range(n - i ): UpperCAmelCase_ : Any = y[j + 1][i - 1] - y[j][i - 1] UpperCAmelCase_ : List[Any] = y[0][0] for i in range(1 , _A ): summ += (ucal(_A , _A ) * y[0][i]) / math.factorial(_A ) print(F'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
355
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { '''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''', # See all Dinat models at https://huggingface.co/models?filter=dinat } class _snake_case ( __snake_case , __snake_case ): '''simple docstring''' A__ : Optional[Any] = "dinat" A__ : Any = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: Any ,lowerCamelCase_: Any=4 ,lowerCamelCase_: Union[str, Any]=3 ,lowerCamelCase_: Union[str, Any]=64 ,lowerCamelCase_: Optional[int]=[3, 4, 6, 5] ,lowerCamelCase_: int=[2, 4, 8, 16] ,lowerCamelCase_: Optional[int]=7 ,lowerCamelCase_: Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] ,lowerCamelCase_: Tuple=3.0 ,lowerCamelCase_: Any=True ,lowerCamelCase_: int=0.0 ,lowerCamelCase_: Optional[Any]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Optional[Any]=0.0_2 ,lowerCamelCase_: List[Any]=1e-5 ,lowerCamelCase_: int=0.0 ,lowerCamelCase_: int=None ,lowerCamelCase_: str=None ,**lowerCamelCase_: Dict ,) -> Union[str, Any]: super().__init__(**lowerCamelCase_ ) UpperCAmelCase_ : List[str] = patch_size UpperCAmelCase_ : Tuple = num_channels UpperCAmelCase_ : Union[str, Any] = embed_dim UpperCAmelCase_ : int = depths UpperCAmelCase_ : List[Any] = len(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = num_heads UpperCAmelCase_ : Tuple = kernel_size UpperCAmelCase_ : int = dilations UpperCAmelCase_ : Optional[Any] = mlp_ratio UpperCAmelCase_ : Optional[Any] = qkv_bias UpperCAmelCase_ : List[Any] = hidden_dropout_prob UpperCAmelCase_ : List[str] = attention_probs_dropout_prob UpperCAmelCase_ : Tuple = drop_path_rate UpperCAmelCase_ : List[str] = hidden_act UpperCAmelCase_ : Any = layer_norm_eps UpperCAmelCase_ : List[str] = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase_ : List[Any] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) UpperCAmelCase_ : Optional[int] = layer_scale_init_value UpperCAmelCase_ : List[Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 ,len(lowerCamelCase_ ) + 1 )] UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = get_aligned_output_features_output_indices( out_features=lowerCamelCase_ ,out_indices=lowerCamelCase_ ,stage_names=self.stage_names )
59
0
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig _A : Tuple = logging.get_logger(__name__) # General docstring _A : List[Any] = '''RegNetConfig''' # Base docstring _A : Any = '''facebook/regnet-y-040''' _A : Any = [1, 1088, 7, 7] # Image classification docstring _A : Any = '''facebook/regnet-y-040''' _A : int = '''tabby, tabby cat''' _A : Union[str, Any] = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class _lowercase ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[str] = "relu" , ) -> Dict: super().__init__() __lowerCAmelCase = nn.Convad( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=kernel_size // 2 , groups=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , ) __lowerCAmelCase = nn.BatchNormad(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity() def a ( self : int , SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: __lowerCAmelCase = self.convolution(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = self.normalization(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ ) return hidden_state class _lowercase ( nn.Module ): '''simple docstring''' def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : RegNetConfig ) -> Union[str, Any]: super().__init__() __lowerCAmelCase = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) __lowerCAmelCase = config.num_channels def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]: __lowerCAmelCase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" ) __lowerCAmelCase = self.embedder(SCREAMING_SNAKE_CASE__ ) return hidden_state class _lowercase ( nn.Module ): '''simple docstring''' def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 ) -> Dict: super().__init__() __lowerCAmelCase = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = nn.BatchNormad(SCREAMING_SNAKE_CASE__ ) def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor: __lowerCAmelCase = self.convolution(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = self.normalization(SCREAMING_SNAKE_CASE__ ) return hidden_state class _lowercase ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Any: super().__init__() __lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) ) __lowerCAmelCase = nn.Sequential( nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 ) , nn.Sigmoid() , ) def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]: # b c h w -> b c 1 1 __lowerCAmelCase = self.pooler(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = self.attention(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = hidden_state * attention return hidden_state class _lowercase ( nn.Module ): '''simple docstring''' def __init__( self : str , SCREAMING_SNAKE_CASE__ : RegNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 ) -> Optional[int]: super().__init__() __lowerCAmelCase = in_channels != out_channels or stride != 1 __lowerCAmelCase = max(1 , out_channels // config.groups_width ) __lowerCAmelCase = ( RegNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity() ) __lowerCAmelCase = nn.Sequential( RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , groups=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ ) , ) __lowerCAmelCase = ACTaFN[config.hidden_act] def a ( self : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple: __lowerCAmelCase = hidden_state __lowerCAmelCase = self.layer(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = self.shortcut(SCREAMING_SNAKE_CASE__ ) hidden_state += residual __lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ ) return hidden_state class _lowercase ( nn.Module ): '''simple docstring''' def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : RegNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 ) -> Any: super().__init__() __lowerCAmelCase = in_channels != out_channels or stride != 1 __lowerCAmelCase = max(1 , out_channels // config.groups_width ) __lowerCAmelCase = ( RegNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity() ) __lowerCAmelCase = nn.Sequential( RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , groups=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) , RegNetSELayer(SCREAMING_SNAKE_CASE__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ ) , ) __lowerCAmelCase = ACTaFN[config.hidden_act] def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]: __lowerCAmelCase = hidden_state __lowerCAmelCase = self.layer(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = self.shortcut(SCREAMING_SNAKE_CASE__ ) hidden_state += residual __lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ ) return hidden_state class _lowercase ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : RegNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , ) -> List[str]: super().__init__() __lowerCAmelCase = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer __lowerCAmelCase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , ) , *[layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(depth - 1 )] , ) def a ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]: __lowerCAmelCase = self.layers(SCREAMING_SNAKE_CASE__ ) return hidden_state class _lowercase ( nn.Module ): '''simple docstring''' def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : RegNetConfig ) -> Optional[int]: super().__init__() __lowerCAmelCase = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( SCREAMING_SNAKE_CASE__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) __lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE__ , config.depths[1:] ): self.stages.append(RegNetStage(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , depth=SCREAMING_SNAKE_CASE__ ) ) def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ) -> BaseModelOutputWithNoAttention: __lowerCAmelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowerCAmelCase = hidden_states + (hidden_state,) __lowerCAmelCase = stage_module(SCREAMING_SNAKE_CASE__ ) if output_hidden_states: __lowerCAmelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ ) class _lowercase ( UpperCAmelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Any = RegNetConfig _SCREAMING_SNAKE_CASE : List[str] = """regnet""" _SCREAMING_SNAKE_CASE : Union[str, Any] = """pixel_values""" _SCREAMING_SNAKE_CASE : List[Any] = True def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]: if isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" ) elif isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str=False ) -> List[Any]: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __lowerCAmelCase = value _A : Tuple = r''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' _A : List[str] = r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( """The bare RegNet model outputting raw features without any specific head on top.""" , UpperCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class _lowercase ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict: super().__init__(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = config __lowerCAmelCase = RegNetEmbeddings(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = RegNetEncoder(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: __lowerCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict __lowerCAmelCase = self.embedder(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = self.encoder( SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = encoder_outputs[0] __lowerCAmelCase = self.pooler(SCREAMING_SNAKE_CASE__ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE__ , pooler_output=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( """ RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ , UpperCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class _lowercase ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Any ) -> Tuple: super().__init__(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = config.num_labels __lowerCAmelCase = RegNetModel(SCREAMING_SNAKE_CASE__ ) # classification head __lowerCAmelCase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: __lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict __lowerCAmelCase = self.regnet(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1] __lowerCAmelCase = self.classifier(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowerCAmelCase = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowerCAmelCase = """single_label_classification""" else: __lowerCAmelCase = """multi_label_classification""" if self.config.problem_type == "regression": __lowerCAmelCase = MSELoss() if self.num_labels == 1: __lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowerCAmelCase = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif self.config.problem_type == "single_label_classification": __lowerCAmelCase = CrossEntropyLoss() __lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowerCAmelCase = BCEWithLogitsLoss() __lowerCAmelCase = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not return_dict: __lowerCAmelCase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
229
'''simple docstring''' import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() _A : str = logging.get_logger(__name__) _A : str = [ ['''attention''', '''attn'''], ['''encoder_attention''', '''encoder_attn'''], ['''q_lin''', '''q_proj'''], ['''k_lin''', '''k_proj'''], ['''v_lin''', '''v_proj'''], ['''out_lin''', '''out_proj'''], ['''norm_embeddings''', '''layernorm_embedding'''], ['''position_embeddings''', '''embed_positions'''], ['''embeddings''', '''embed_tokens'''], ['''ffn.lin''', '''fc'''], ] def UpperCamelCase_ ( snake_case_ : Tuple ) -> Optional[int]: '''simple docstring''' if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: __lowerCAmelCase = k.replace(snake_case_ , snake_case_ ) if k.startswith("""encoder""" ): __lowerCAmelCase = k.replace(""".attn""" , """.self_attn""" ) __lowerCAmelCase = k.replace("""norm1""" , """self_attn_layer_norm""" ) __lowerCAmelCase = k.replace("""norm2""" , """final_layer_norm""" ) elif k.startswith("""decoder""" ): __lowerCAmelCase = k.replace("""norm1""" , """self_attn_layer_norm""" ) __lowerCAmelCase = k.replace("""norm2""" , """encoder_attn_layer_norm""" ) __lowerCAmelCase = k.replace("""norm3""" , """final_layer_norm""" ) return k def UpperCamelCase_ ( snake_case_ : List[str] ) -> Dict: '''simple docstring''' __lowerCAmelCase = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: __lowerCAmelCase = sd.pop(snake_case_ ) __lowerCAmelCase = k.replace("""layernorm_embedding""" , """layer_norm""" ) assert new_k not in sd __lowerCAmelCase = v _A : int = ['''START'''] @torch.no_grad() def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' __lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" ) __lowerCAmelCase = model["""model"""] __lowerCAmelCase = BlenderbotConfig.from_json_file(snake_case_ ) __lowerCAmelCase = BlenderbotForConditionalGeneration(snake_case_ ) __lowerCAmelCase = m.model.state_dict().keys() __lowerCAmelCase = [] __lowerCAmelCase = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue __lowerCAmelCase = rename_state_dict_key(snake_case_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: __lowerCAmelCase = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(snake_case_ ) m.model.load_state_dict(snake_case_ , strict=snake_case_ ) m.half() m.save_pretrained(snake_case_ ) if __name__ == "__main__": _A : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''') parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''') parser.add_argument( '''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use''' ) _A : Tuple = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
229
1
'''simple docstring''' import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": UpperCamelCase : str = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( """--original_config_file""", default=None, type=str, help="""The YAML config file corresponding to the original architecture.""", ) parser.add_argument( """--num_in_channels""", default=None, type=int, help="""The number of input channels. If `None` number of input channels will be automatically inferred.""", ) parser.add_argument( """--scheduler_type""", default="""pndm""", type=str, help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""", ) parser.add_argument( """--pipeline_type""", default=None, type=str, help=( """The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'""" """. If `None` pipeline will be automatically inferred.""" ), ) parser.add_argument( """--image_size""", default=None, type=int, help=( """The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2""" """ Base. Use 768 for Stable Diffusion v2.""" ), ) parser.add_argument( """--prediction_type""", default=None, type=str, help=( """The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable""" """ Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2.""" ), ) parser.add_argument( """--extract_ema""", action="""store_true""", help=( """Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights""" """ or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield""" """ higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.""" ), ) parser.add_argument( """--upcast_attention""", action="""store_true""", help=( """Whether the attention computation should always be upcasted. This is necessary when running stable""" """ diffusion 2.1.""" ), ) parser.add_argument( """--from_safetensors""", action="""store_true""", help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""", ) parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") parser.add_argument( """--stable_unclip""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""", ) parser.add_argument( """--stable_unclip_prior""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""", ) parser.add_argument( """--clip_stats_path""", type=str, help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""", required=False, ) parser.add_argument( """--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint.""" ) parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--vae_path""", type=str, default=None, required=False, help="""Set to a path, hub id to an already converted vae to not convert it again.""", ) UpperCamelCase : str = parser.parse_args() UpperCamelCase : List[str] = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
352
'''simple docstring''' import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed UpperCamelCase : int = """true""" def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]: """simple docstring""" set_seed(42 ) a : List[str] = RegressionModel() a : Union[str, Any] = deepcopy(snake_case ) a : Dict = RegressionDataset(length=snake_case ) a : Dict = DataLoader(snake_case , batch_size=snake_case ) model.to(accelerator.device ) a , a : Optional[int] = accelerator.prepare(snake_case , snake_case ) return model, ddp_model, dataloader def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]: """simple docstring""" a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' ) a : Any = load_dataset('glue' , 'mrpc' , split='validation' ) def tokenize_function(snake_case : int ): a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case ) return outputs with accelerator.main_process_first(): a : Dict = dataset.map( snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , ) a : List[str] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(snake_case : Optional[Any] ): if use_longest: return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' ) return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' ) return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case ) a : List[str] = get_dataloader(snake_case , not dispatch_batches ) a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case ) a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" a : Dict = [] for batch in dataloader: a , a : Any = batch.values() with torch.no_grad(): a : Tuple = model(snake_case ) a , a : Dict = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) a , a : List[str] = [], [] for logit, targ in logits_and_targets: logits.append(snake_case ) targs.append(snake_case ) a , a : Any = torch.cat(snake_case ), torch.cat(snake_case ) return logits, targs def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]: """simple docstring""" a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case ) a , a : int = generate_predictions(snake_case , snake_case , snake_case ) assert ( len(snake_case ) == num_samples ), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}""" def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]: """simple docstring""" a : int = evaluate.load('glue' , 'mrpc' ) a , a : Tuple = get_mrpc_setup(snake_case , snake_case ) # First do baseline a , a , a : Tuple = setup['no'] model.to(snake_case ) model.eval() for batch in dataloader: batch.to(snake_case ) with torch.inference_mode(): a : List[Any] = model(**snake_case ) a : Optional[Any] = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=snake_case , references=batch['labels'] ) a : Tuple = metric.compute() # Then do distributed a , a , a : Tuple = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): a : List[str] = model(**snake_case ) a : Optional[Any] = outputs.logits.argmax(dim=-1 ) a : Optional[int] = batch['labels'] a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=snake_case , references=snake_case ) a : str = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def SCREAMING_SNAKE_CASE__ ( ) -> str: """simple docstring""" a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" ) test_mrpc(snake_case , snake_case ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case ) if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" ) test_torch_metrics(snake_case , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**' ) a : Optional[Any] = Accelerator() test_torch_metrics(snake_case , 512 ) accelerator.state._reset_state() def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
345
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class __snake_case ( _snake_case): def __init__( self : Any , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Optional[Any] ): """simple docstring""" warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
72
'''simple docstring''' def UpperCamelCase( UpperCAmelCase_ ): UpperCAmelCase : Optional[Any] = set() # edges = list of graph's edges UpperCAmelCase : str = get_edges(UpperCAmelCase_ ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: UpperCAmelCase , UpperCAmelCase : Union[str, Any] = edges.pop() chosen_vertices.add(UpperCAmelCase_ ) chosen_vertices.add(UpperCAmelCase_ ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(UpperCAmelCase_ ) return chosen_vertices def UpperCamelCase( UpperCAmelCase_ ): UpperCAmelCase : List[str] = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
151
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { 'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'GraphormerForGraphClassification', 'GraphormerModel', 'GraphormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
361
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class __a ( __UpperCamelCase ): def __init__( self : Union[str, Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ): warnings.warn( """The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use GLPNImageProcessor instead.""" , UpperCAmelCase , ) super().__init__(*UpperCAmelCase , **UpperCAmelCase )
28
0
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case__ : Tuple = logging.get_logger(__name__) snake_case__ : Any = { '''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class snake_case_( a__ ): __UpperCamelCase = '''deformable_detr''' __UpperCamelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : str , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : List[Any]=3_0_0 , UpperCamelCase_ : int=1_0_2_4 , UpperCamelCase_ : Any=6 , UpperCamelCase_ : Optional[Any]=1_0_2_4 , UpperCamelCase_ : Optional[Any]=8 , UpperCamelCase_ : List[Any]=6 , UpperCamelCase_ : Any=1_0_2_4 , UpperCamelCase_ : List[str]=8 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : int=True , UpperCamelCase_ : Union[str, Any]="relu" , UpperCamelCase_ : int=2_5_6 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[Any]=1.0 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : List[str]="sine" , UpperCamelCase_ : Union[str, Any]="resnet50" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : int=4 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Dict=False , UpperCamelCase_ : str=3_0_0 , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Any=5 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Any=1 , UpperCamelCase_ : Dict=1 , UpperCamelCase_ : Any=5 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Any=0.25 , UpperCamelCase_ : Optional[Any]=False , **UpperCamelCase_ : Tuple , ): if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowerCAmelCase : List[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(__a , __a ): lowerCAmelCase : List[str] = backbone_config.get('''model_type''' ) lowerCAmelCase : List[Any] = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase : Optional[int] = config_class.from_dict(__a ) lowerCAmelCase : Optional[int] = use_timm_backbone lowerCAmelCase : Tuple = backbone_config lowerCAmelCase : Optional[Any] = num_channels lowerCAmelCase : Any = num_queries lowerCAmelCase : int = max_position_embeddings lowerCAmelCase : Optional[Any] = d_model lowerCAmelCase : Any = encoder_ffn_dim lowerCAmelCase : int = encoder_layers lowerCAmelCase : Union[str, Any] = encoder_attention_heads lowerCAmelCase : Union[str, Any] = decoder_ffn_dim lowerCAmelCase : int = decoder_layers lowerCAmelCase : str = decoder_attention_heads lowerCAmelCase : Optional[int] = dropout lowerCAmelCase : int = attention_dropout lowerCAmelCase : Dict = activation_dropout lowerCAmelCase : str = activation_function lowerCAmelCase : List[Any] = init_std lowerCAmelCase : List[Any] = init_xavier_std lowerCAmelCase : Union[str, Any] = encoder_layerdrop lowerCAmelCase : Optional[Any] = auxiliary_loss lowerCAmelCase : str = position_embedding_type lowerCAmelCase : Optional[int] = backbone lowerCAmelCase : Any = use_pretrained_backbone lowerCAmelCase : List[Any] = dilation # deformable attributes lowerCAmelCase : Tuple = num_feature_levels lowerCAmelCase : Union[str, Any] = encoder_n_points lowerCAmelCase : str = decoder_n_points lowerCAmelCase : Tuple = two_stage lowerCAmelCase : List[Any] = two_stage_num_proposals lowerCAmelCase : Any = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher lowerCAmelCase : List[str] = class_cost lowerCAmelCase : int = bbox_cost lowerCAmelCase : List[Any] = giou_cost # Loss coefficients lowerCAmelCase : Tuple = mask_loss_coefficient lowerCAmelCase : Tuple = dice_loss_coefficient lowerCAmelCase : List[Any] = bbox_loss_coefficient lowerCAmelCase : List[Any] = giou_loss_coefficient lowerCAmelCase : Optional[int] = eos_coefficient lowerCAmelCase : Any = focal_alpha lowerCAmelCase : List[Any] = disable_custom_kernels super().__init__(is_encoder_decoder=__a , **__a ) @property def lowerCamelCase__ ( self : List[Any] ): return self.encoder_attention_heads @property def lowerCamelCase__ ( self : Any ): return self.d_model def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : List[Any] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowerCAmelCase : List[str] = self.backbone_config.to_dict() lowerCAmelCase : Tuple = self.__class__.model_type return output
60
"""simple docstring""" def lowerCamelCase__ ( __snake_case ) -> bool: """simple docstring""" return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') ) def lowerCamelCase__ ( __snake_case ) -> bool: """simple docstring""" _UpperCamelCase = credit_card_number _UpperCamelCase = 0 _UpperCamelCase = len(__snake_case ) - 2 for i in range(__snake_case, -1, -2 ): # double the value of every second digit _UpperCamelCase = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 _UpperCamelCase = cc_number[:i] + str(__snake_case ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(__snake_case ) - 1, -1, -2 ): total += int(cc_number[i] ) return total % 10 == 0 def lowerCamelCase__ ( __snake_case ) -> bool: """simple docstring""" _UpperCamelCase = F'''{credit_card_number} is an invalid credit card number because''' if not credit_card_number.isdigit(): print(F'''{error_message} it has nonnumerical characters.''' ) return False if not 13 <= len(__snake_case ) <= 16: print(F'''{error_message} of its length.''' ) return False if not validate_initial_digits(__snake_case ): print(F'''{error_message} of its first two digits.''' ) return False if not luhn_validation(__snake_case ): print(F'''{error_message} it fails the Luhn check.''' ) return False print(F'''{credit_card_number} is a valid credit card number.''' ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("""4111111111111111""") validate_credit_card_number("""32323""")
194
0
import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCAmelCase_ : '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=0.6 , __SCREAMING_SNAKE_CASE=None , ): """simple docstring""" UpperCamelCase : Tuple = parent UpperCamelCase : Dict = batch_size UpperCamelCase : List[str] = image_size UpperCamelCase : Any = patch_size UpperCamelCase : Optional[Any] = num_channels UpperCamelCase : Union[str, Any] = is_training UpperCamelCase : Optional[int] = use_labels UpperCamelCase : str = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : int = num_attention_heads UpperCamelCase : Union[str, Any] = intermediate_size UpperCamelCase : int = hidden_act UpperCamelCase : Union[str, Any] = hidden_dropout_prob UpperCamelCase : Optional[Any] = attention_probs_dropout_prob UpperCamelCase : int = type_sequence_label_size UpperCamelCase : Tuple = initializer_range UpperCamelCase : List[Any] = mask_ratio UpperCamelCase : Dict = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCamelCase : Any = (image_size // patch_size) ** 2 UpperCamelCase : Optional[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : Any = None if self.use_labels: UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : Optional[int] = self.get_config() return config, pixel_values, labels def _lowercase ( self ): """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : int = ViTMAEModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : Dict = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase : List[str] = ViTMAEForPreTraining(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : Dict = model(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = (self.image_size // self.patch_size) ** 2 UpperCamelCase : Dict = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images UpperCamelCase : str = 1 UpperCamelCase : int = ViTMAEForPreTraining(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = self.prepare_config_and_inputs() UpperCamelCase : List[Any] = config_and_inputs UpperCamelCase : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a, _a, unittest.TestCase): '''simple docstring''' __UpperCamelCase : int = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () __UpperCamelCase : Optional[int] = {"feature-extraction": ViTMAEModel} if is_torch_available() else {} __UpperCamelCase : Tuple = False __UpperCamelCase : Any = False __UpperCamelCase : Union[str, Any] = False __UpperCamelCase : Tuple = False def _lowercase ( self ): """simple docstring""" UpperCamelCase : int = ViTMAEModelTester(self ) UpperCamelCase : str = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def _lowercase ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def _lowercase ( self ): """simple docstring""" pass def _lowercase ( self ): """simple docstring""" UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCamelCase : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Tuple = model_class(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : Tuple = [*signature.parameters.keys()] UpperCamelCase : Dict = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__SCREAMING_SNAKE_CASE ) def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" np.random.seed(2 ) UpperCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) UpperCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCamelCase : Any = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCamelCase : Tuple = pt_noise super().check_pt_tf_models(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _lowercase ( self ): """simple docstring""" UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): UpperCamelCase : int = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) UpperCamelCase : Dict = outputs[0].cpu().numpy() UpperCamelCase : Optional[Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__SCREAMING_SNAKE_CASE ) UpperCamelCase : List[Any] = model_class.from_pretrained(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): UpperCamelCase : Optional[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) # Make sure we don't have nans UpperCamelCase : str = after_outputs[0].cpu().numpy() UpperCamelCase : List[str] = 0 UpperCamelCase : Dict = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-5 ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _lowercase ( self ): """simple docstring""" pass @slow def _lowercase ( self ): """simple docstring""" for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Any = ViTMAEModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def a ( ): """simple docstring""" UpperCamelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): '''simple docstring''' @cached_property def _lowercase ( self ): """simple docstring""" return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def _lowercase ( self ): """simple docstring""" np.random.seed(2 ) UpperCamelCase : int = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = self.default_image_processor UpperCamelCase : str = prepare_img() UpperCamelCase : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCamelCase : int = ViTMAEConfig() UpperCamelCase : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) UpperCamelCase : Union[str, Any] = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): UpperCamelCase : Optional[Any] = model(**__SCREAMING_SNAKE_CASE , noise=torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE ) ) # verify the logits UpperCamelCase : Dict = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE ) UpperCamelCase : int = torch.tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__SCREAMING_SNAKE_CASE ) , atol=1e-4 ) )
364
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" UpperCamelCase : Any = set() # Replace all the whitespace in our sentence UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(SCREAMING_SNAKE_CASE_ ) == 2_6 def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" UpperCamelCase : str = [False] * 2_6 for char in input_str: if char.islower(): UpperCamelCase : List[Any] = True elif char.isupper(): UpperCamelCase : List[Any] = True return all(SCREAMING_SNAKE_CASE_ ) def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6 def a ( ): """simple docstring""" from timeit import timeit UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest''' print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) ) print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) ) print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
315
0
"""simple docstring""" from __future__ import annotations _SCREAMING_SNAKE_CASE : int = list[tuple[int, int]] _SCREAMING_SNAKE_CASE : str = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _SCREAMING_SNAKE_CASE : Union[str, Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class a : def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : Node | None , ) -> int: lowerCamelCase_ = pos_x lowerCamelCase_ = pos_y lowerCamelCase_ = (pos_y, pos_x) lowerCamelCase_ = goal_x lowerCamelCase_ = goal_y lowerCamelCase_ = g_cost lowerCamelCase_ = parent lowerCamelCase_ = self.calculate_heuristic() def UpperCamelCase ( self : str ) -> float: lowerCamelCase_ = abs(self.pos_x - self.goal_x ) lowerCamelCase_ = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : int , __SCREAMING_SNAKE_CASE : int ) -> bool: return self.f_cost < other.f_cost class a : def __init__( self : str , __SCREAMING_SNAKE_CASE : tuple[int, int] , __SCREAMING_SNAKE_CASE : tuple[int, int] ) -> Optional[Any]: lowerCamelCase_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __SCREAMING_SNAKE_CASE ) lowerCamelCase_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __SCREAMING_SNAKE_CASE ) lowerCamelCase_ = [self.start] lowerCamelCase_ = [] lowerCamelCase_ = False def UpperCamelCase ( self : List[Any] ) -> Path | None: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() lowerCamelCase_ = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: lowerCamelCase_ = True return self.retrace_path(__SCREAMING_SNAKE_CASE ) self.closed_nodes.append(__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = self.get_successors(__SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__SCREAMING_SNAKE_CASE ) else: # retrieve the best current path lowerCamelCase_ = self.open_nodes.pop(self.open_nodes.index(__SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(__SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Node ) -> list[Node]: lowerCamelCase_ = [] for action in delta: lowerCamelCase_ = parent.pos_x + action[1] lowerCamelCase_ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __SCREAMING_SNAKE_CASE , ) ) return successors def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Node | None ) -> Path: lowerCamelCase_ = node lowerCamelCase_ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowerCamelCase_ = current_node.parent path.reverse() return path if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Dict = (0, 0) _SCREAMING_SNAKE_CASE : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('''------''') _SCREAMING_SNAKE_CASE : Optional[int] = GreedyBestFirst(init, goal) _SCREAMING_SNAKE_CASE : Dict = greedy_bf.search() if path: for pos_x, pos_y in path: _SCREAMING_SNAKE_CASE : Tuple = 2 for elem in grid: print(elem)
183
"""simple docstring""" import argparse import json import subprocess def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> List[Any]: lowerCamelCase_ = [] lowerCamelCase_ = ( F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"''' ' https://api.github.com/repos/huggingface/transformers/actions/runners' ) lowerCamelCase_ = subprocess.run(_lowerCamelCase , shell=_lowerCamelCase , stdout=subprocess.PIPE ) lowerCamelCase_ = output.stdout.decode('utf-8' ) lowerCamelCase_ = json.loads(_lowerCamelCase ) lowerCamelCase_ = status['runners'] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(_lowerCamelCase ) # save the result so we can report them on Slack with open('offline_runners.txt' , 'w' ) as fp: fp.write(json.dumps(_lowerCamelCase ) ) if len(_lowerCamelCase ) > 0: lowerCamelCase_ = '\n'.join([x['name'] for x in offline_runners] ) raise ValueError(F'''The following runners are offline:\n{failed}''' ) if __name__ == "__main__": def lowerCamelCase__ ( _lowerCamelCase : Dict ) -> Tuple: return values.split(',' ) _SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--target_runners''', default=None, type=list_str, required=True, help='''Comma-separated list of runners to check status.''', ) parser.add_argument( '''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.''' ) _SCREAMING_SNAKE_CASE : Any = parser.parse_args() get_runner_status(args.target_runners, args.token)
183
1
"""simple docstring""" def _snake_case ( lowercase__ : int , lowercase__ : int ) -> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) lowerCAmelCase_ :Union[str, Any] = str(bin(lowercase__ ) )[2:] # remove the leading "0b" lowerCAmelCase_ :Dict = str(bin(lowercase__ ) )[2:] lowerCAmelCase_ :List[Any] = max(len(lowercase__ ) , len(lowercase__ ) ) return "0b" + "".join( str(int("""1""" in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
1
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset __UpperCAmelCase = pd.read_csv( 'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/' 'position_salaries.csv' ) __UpperCAmelCase = dataset.iloc[:, 1:2].values __UpperCAmelCase = dataset.iloc[:, 2].values __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0) __UpperCAmelCase = PolynomialFeatures(degree=4) __UpperCAmelCase = poly_reg.fit_transform(X) __UpperCAmelCase = LinearRegression() pol_reg.fit(X_poly, y) def _snake_case ( ) -> str: '''simple docstring''' plt.scatter(lowercase__ , lowercase__ , color="""red""" ) plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" ) plt.title("""Truth or Bluff (Linear Regression)""" ) plt.xlabel("""Position level""" ) plt.ylabel("""Salary""" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
1
1
"""simple docstring""" from __future__ import annotations _lowercase = 1.6021e-19 # units = C def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ): if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif conductivity < 0: raise ValueError('Conductivity cannot be negative' ) elif electron_conc < 0: raise ValueError('Electron concentration cannot be negative' ) elif mobility < 0: raise ValueError('mobility cannot be negative' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
74
"""simple docstring""" from __future__ import annotations import math _lowercase = '''2020.9.26''' _lowercase = '''xcodz-dot, cclaus, dhruvmanila''' def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ): if not all(isinstance(snake_case__ , (float, int) ) for val in locals().values() ): A = F'Input values must either be float or int: {list(locals().values() )}' raise TypeError(snake_case__ ) A = ((x * distance) / (z + distance)) * scale A = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : str , snake_case__ : float ): if not isinstance(snake_case__ , snake_case__ ): raise TypeError('Axis must be a str' ) A = locals() del input_variables["axis"] if not all(isinstance(snake_case__ , (float, int) ) for val in input_variables.values() ): A = ( 'Input values except axis must either be float or int: ' F'{list(input_variables.values() )}' ) raise TypeError(snake_case__ ) A = (angle % 360) / 450 * 180 / math.pi if axis == "z": A = x * math.cos(snake_case__ ) - y * math.sin(snake_case__ ) A = y * math.cos(snake_case__ ) + x * math.sin(snake_case__ ) A = z elif axis == "x": A = y * math.cos(snake_case__ ) - z * math.sin(snake_case__ ) A = z * math.cos(snake_case__ ) + y * math.sin(snake_case__ ) A = x elif axis == "y": A = x * math.cos(snake_case__ ) - z * math.sin(snake_case__ ) A = z * math.cos(snake_case__ ) + x * math.sin(snake_case__ ) A = y else: raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""") print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
74
1
"""simple docstring""" from math import factorial def UpperCAmelCase ( a_ = 100 ): '''simple docstring''' return sum(int(a_ ) for x in str(factorial(a_ ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
205
"""simple docstring""" def UpperCAmelCase ( a_ ): '''simple docstring''' lowerCamelCase : List[Any] = 1 for i in range(1, num + 1 ): fact *= i return fact def UpperCAmelCase ( a_ ): '''simple docstring''' lowerCamelCase : Optional[Any] = 0 while number > 0: lowerCamelCase : str = number % 10 sum_of_digits += last_digit lowerCamelCase : Tuple = number // 10 # Removing the last_digit from the given number return sum_of_digits def UpperCAmelCase ( a_ = 100 ): '''simple docstring''' lowerCamelCase : Optional[Any] = factorial(a_ ) lowerCamelCase : int = split_and_add(a_ ) return result if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
205
1
import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask __lowerCamelCase : Dict = logging.getLogger(__name__) class A__ ( __snake_case ): _UpperCAmelCase :List[str] = 'token-classification' def __init__( self , A_ ): '''simple docstring''' if type(A_ ) == dict: UpperCamelCase : int = Namespace(**A_ ) UpperCamelCase : List[Any] = import_module("tasks" ) try: UpperCamelCase : List[str] = getattr(A_ , hparams.task_type ) UpperCamelCase : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) UpperCamelCase : str = self.token_classification_task.get_labels(hparams.labels ) UpperCamelCase : List[Any] = CrossEntropyLoss().ignore_index super().__init__(A_ , len(self.labels ) , self.mode ) def __UpperCamelCase( self , **A_ ): '''simple docstring''' return self.model(**A_ ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": UpperCamelCase : Dict = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids UpperCamelCase : int = self(**A_ ) UpperCamelCase : List[Any] = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.hparams for mode in ["train", "dev", "test"]: UpperCamelCase : Dict = self._feature_file(A_ ) if os.path.exists(A_ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , A_ ) UpperCamelCase : Dict = torch.load(A_ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) UpperCamelCase : List[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , A_ ) UpperCamelCase : int = self.token_classification_task.convert_examples_to_features( A_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=A_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("Saving features into cached file %s" , A_ ) torch.save(A_ , A_ ) def __UpperCamelCase( self , A_ , A_ , A_ = False ): '''simple docstring''' UpperCamelCase : Optional[Any] = self._feature_file(A_ ) logger.info("Loading features from cached file %s" , A_ ) UpperCamelCase : Optional[int] = torch.load(A_ ) UpperCamelCase : Optional[int] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) UpperCamelCase : Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: UpperCamelCase : Tuple = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: UpperCamelCase : List[str] = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) UpperCamelCase : int = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(A_ , A_ , A_ , A_ ) , batch_size=A_ ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' """Compute validation""" "" UpperCamelCase : str = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": UpperCamelCase : Optional[int] = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids UpperCamelCase : List[str] = self(**A_ ) UpperCamelCase , UpperCamelCase : List[Any] = outputs[:2] UpperCamelCase : int = logits.detach().cpu().numpy() UpperCamelCase : Dict = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[str] = torch.stack([x["val_loss"] for x in outputs] ).mean() UpperCamelCase : Union[str, Any] = np.concatenate([x["pred"] for x in outputs] , axis=0 ) UpperCamelCase : int = np.argmax(A_ , axis=2 ) UpperCamelCase : Optional[int] = np.concatenate([x["target"] for x in outputs] , axis=0 ) UpperCamelCase : List[Any] = dict(enumerate(self.labels ) ) UpperCamelCase : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )] UpperCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) UpperCamelCase : Dict = { "val_loss": val_loss_mean, "accuracy_score": accuracy_score(A_ , A_ ), "precision": precision_score(A_ , A_ ), "recall": recall_score(A_ , A_ ), "f1": fa_score(A_ , A_ ), } UpperCamelCase : List[str] = dict(results.items() ) UpperCamelCase : Union[str, Any] = results return ret, preds_list, out_label_list def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase , UpperCamelCase : int = self._eval_end(A_ ) UpperCamelCase : Union[str, Any] = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase , UpperCamelCase : str = self._eval_end(A_ ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 UpperCamelCase : Optional[int] = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __UpperCamelCase( A_ , A_ ): '''simple docstring''' BaseTransformer.add_model_specific_args(A_ , A_ ) parser.add_argument( "--task_type" , default="NER" , type=A_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" ) parser.add_argument( "--max_seq_length" , default=128 , type=A_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--labels" , default="" , type=A_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , ) parser.add_argument( "--gpus" , default=0 , type=A_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser if __name__ == "__main__": __lowerCamelCase : List[str] = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) __lowerCamelCase : Tuple = NERTransformer.add_model_specific_args(parser, os.getcwd()) __lowerCamelCase : List[str] = parser.parse_args() __lowerCamelCase : Any = NERTransformer(args) __lowerCamelCase : Any = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 __lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True)) __lowerCamelCase : Dict = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
52
A_ :str = '''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
71
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __magic_name__: Dict = logging.get_logger(__name__) __magic_name__: List[Any] = "▁" __magic_name__: Tuple = {"vocab_file": "spiece.model"} __magic_name__: Dict = { "vocab_file": { "google/reformer-crime-and-punishment": ( "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model" ) } } __magic_name__: Optional[Any] = { "google/reformer-crime-and-punishment": 524_288, } class snake_case__ ( _lowerCAmelCase ): lowercase__ : List[str] = VOCAB_FILES_NAMES lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ : Union[str, Any] = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="</s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__=[] , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None: __magic_name__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , ) __magic_name__ : Optional[int] = vocab_file __magic_name__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase__ ) @property def __magic_name__ ( self ) -> Tuple: return self.sp_model.get_piece_size() def __magic_name__ ( self ) -> Dict[str, int]: __magic_name__ : str = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> List[Any]: __magic_name__ : Tuple = self.__dict__.copy() __magic_name__ : List[Any] = None return state def __setstate__( self , lowerCAmelCase__ ) -> Tuple: __magic_name__ : int = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __magic_name__ : Optional[Any] = {} __magic_name__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]: return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ ) def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]: return self.sp_model.piece_to_id(lowerCAmelCase__ ) def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple: if index < self.sp_model.get_piece_size(): __magic_name__ : List[str] = self.sp_model.IdToPiece(lowerCAmelCase__ ) return token def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]: __magic_name__ : Any = [] __magic_name__ : Optional[int] = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase__ ) + token __magic_name__ : str = [] else: current_sub_tokens.append(lowerCAmelCase__ ) out_string += self.sp_model.decode(lowerCAmelCase__ ) return out_string.strip() def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return __magic_name__ : Tuple = os.path.join( lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase__ , """wb""" ) as fi: __magic_name__ : List[Any] = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase__ ) return (out_vocab_file,)
357
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __magic_name__: Tuple = { "configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"], "tokenization_tapas": ["TapasTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__: Dict = [ "TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TapasForMaskedLM", "TapasForQuestionAnswering", "TapasForSequenceClassification", "TapasModel", "TapasPreTrainedModel", "load_tf_weights_in_tapas", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__: int = [ "TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TFTapasForMaskedLM", "TFTapasForQuestionAnswering", "TFTapasForSequenceClassification", "TFTapasModel", "TFTapasPreTrainedModel", ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys __magic_name__: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
138
0
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase : int = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class __snake_case ( lowerCamelCase_ , unittest.TestCase ): lowerCAmelCase_ = XLMRobertaTokenizer lowerCAmelCase_ = XLMRobertaTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True def __a ( self : int ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer(_lowercase , keep_accents=_lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """<pad>""" SCREAMING_SNAKE_CASE__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase ) def __a ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(_lowercase ) , 10_02 ) def __a ( self : List[Any] ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_02 ) def __a ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer(_lowercase , keep_accents=_lowercase ) SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowercase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(_lowercase ) self.assertListEqual( _lowercase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(_lowercase ) self.assertListEqual( _lowercase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def __a ( self : Dict ): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return SCREAMING_SNAKE_CASE__ = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase ) SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase ) SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(_lowercase ) SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(_lowercase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) SCREAMING_SNAKE_CASE__ = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(_lowercase , _lowercase ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(_lowercase ) SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_lowercase ) # Save tokenizer rust, legacy_format=True SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase ) SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(_lowercase ) # Checks it save with the same files self.assertSequenceEqual(_lowercase , _lowercase ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(_lowercase ) SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) shutil.rmtree(_lowercase ) # Save tokenizer rust, legacy_format=False SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase ) SCREAMING_SNAKE_CASE__ = tokenizer_p.save_pretrained(_lowercase ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE__ = tokenizer_r.from_pretrained(_lowercase ) SCREAMING_SNAKE_CASE__ = tokenizer_p.from_pretrained(_lowercase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowercase , _lowercase ) ) shutil.rmtree(_lowercase ) @cached_property def __a ( self : Optional[Any] ): """simple docstring""" return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" ) def __a ( self : List[str] ): """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(_lowercase , f.name ) SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer(f.name , keep_accents=_lowercase ) SCREAMING_SNAKE_CASE__ = pickle.dumps(_lowercase ) pickle.loads(_lowercase ) def __a ( self : Dict ): """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ = self.get_tokenizer() SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ = """I was born in 92000, and this is falsé.""" SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(_lowercase ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) SCREAMING_SNAKE_CASE__ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) self.assertListEqual(_lowercase , _lowercase ) SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ = tokenizer.encode(_lowercase ) SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) @slow def __a ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """Hello World!""" SCREAMING_SNAKE_CASE__ = [0, 3_53_78, 66_61, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) ) @slow def __a ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) SCREAMING_SNAKE_CASE__ = [ 0, 32_93, 83, 10, 45_52, 49_89, 79_86, 6_78, 10, 59_15, 1_11, 17_94_59, 12_48_50, 4, 60_44, 2_37, 12, 6, 5, 6, 4, 67_80, 7_05, 15, 13_88, 44, 3_78, 1_01_14, 7_11, 1_52, 20, 6, 5, 2_23_76, 6_42, 12_21, 1_51_90, 3_41_53, 4_50, 56_08, 9_59, 11_19, 5_77_02, 1_36, 1_86, 47, 10_98, 2_93_67, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 60_44, 2_37, 62_84, 5_09_01, 5_28, 31, 90, 34, 9_27, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) ) @slow def __a ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = {"""input_ids""": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowercase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
219
import socket def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) SCREAMING_SNAKE_CASE__ = socket.gethostname() SCREAMING_SNAKE_CASE__ = 1_23_12 sock.connect((host, port) ) sock.send(B"""Hello server!""" ) with open("""Received_file""" , """wb""" ) as out_file: print("""File opened""" ) print("""Receiving data...""" ) while True: SCREAMING_SNAKE_CASE__ = sock.recv(10_24 ) if not data: break out_file.write(__UpperCamelCase ) print("""Successfully received the file""" ) sock.close() print("""Connection closed""" ) if __name__ == "__main__": main()
219
1
"""simple docstring""" def snake_case (A_ :Union[str, Any] ): '''simple docstring''' if not head: return True # split the list to two parts a, a : Dict = head.next, head while fast and fast.next: a : str = fast.next.next a : str = slow.next a : Optional[Any] = slow.next a : List[Any] = None # Don't forget here! But forget still works! # reverse the second part a : Dict = None while second: a : int = second.next a : int = node a : List[Any] = second a : Any = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False a : str = node.next a : str = head.next return True def snake_case (A_ :List[str] ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) a : Any = head while fast and fast.next: a, a : Dict = fast.next.next, slow.next # 2. Push the second half into the stack a : Union[str, Any] = [slow.val] while slow.next: a : str = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False a : List[str] = cur.next return True def snake_case (A_ :Any ): '''simple docstring''' if not head or not head.next: return True a : List[str] = {} a : List[Any] = 0 while head: if head.val in d: d[head.val].append(A_ ) else: a : Dict = [pos] a : Optional[Any] = head.next pos += 1 a : Dict = pos - 1 a : Union[str, Any] = 0 for v in d.values(): if len(A_ ) % 2 != 0: middle += 1 else: a : Any = 0 for i in range(0 , len(A_ ) ): if v[i] + v[len(A_ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
186
"""simple docstring""" import argparse from collections import defaultdict import yaml _UpperCamelCase : int = 'docs/source/en/_toctree.yml' def snake_case (A_ :Optional[Any] ): '''simple docstring''' a : List[Any] = defaultdict(A_ ) for doc in model_doc: counts[doc["local"]] += 1 a : Optional[Any] = [key for key, value in counts.items() if value > 1] a : List[str] = [] for duplicate_key in duplicates: a : int = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} ) if len(A_ ) > 1: raise ValueError( f'''{duplicate_key} is present several times in the documentation table of content at ''' '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] ) # Sort return sorted(A_ , key=lambda A_ : s["title"].lower() ) def snake_case (A_ :List[str]=False ): '''simple docstring''' with open(A_ , encoding='utf-8' ) as f: a : Dict = yaml.safe_load(f.read() ) # Get to the API doc a : Optional[Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 a : List[str] = content[api_idx]['sections'] # Then to the model doc a : Optional[int] = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 a : Optional[Any] = api_doc[model_idx]['sections'] a : Dict = [(idx, section) for idx, section in enumerate(A_ ) if 'sections' in section] a : List[str] = False for idx, modality_doc in modalities_docs: a : str = modality_doc['sections'] a : str = clean_model_doc_toc(A_ ) if old_modality_doc != new_modality_doc: a : str = True if overwrite: a : Any = new_modality_doc if diff: if overwrite: a : Any = model_doc a : str = api_doc with open(A_ , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(A_ , allow_unicode=A_ ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": _UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') _UpperCamelCase : Any = parser.parse_args() check_model_doc(args.fix_and_overwrite)
186
1
"""simple docstring""" import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _UpperCamelCase : Tuple = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( _a , unittest.TestCase): lowerCamelCase__ : Dict = AlbertTokenizer lowerCamelCase__ : str = AlbertTokenizerFast lowerCamelCase__ : Any = True lowerCamelCase__ : Tuple = True lowerCamelCase__ : Dict = True def _UpperCAmelCase ( self ) -> Dict: super().setUp() # We have a SentencePiece fixture for testing lowercase__ : Any = AlbertTokenizer(a ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCAmelCase ( self , a ) -> Optional[Any]: lowercase__ : Dict = 'this is a test' lowercase__ : Dict = 'this is a test' return input_text, output_text def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : Union[str, Any] = '<pad>' lowercase__ : Optional[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a ) def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '<unk>' ) self.assertEqual(vocab_keys[-1] , '▁eloquent' ) self.assertEqual(len(a ) , 3_0_0_0_0 ) def _UpperCAmelCase ( self ) -> str: self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 ) def _UpperCAmelCase ( self ) -> Optional[Any]: if not self.test_rust_tokenizer: return lowercase__ : Union[str, Any] = self.get_tokenizer() lowercase__ : Union[str, Any] = self.get_rust_tokenizer() lowercase__ : List[Any] = 'I was born in 92000, and this is falsé.' lowercase__ : Union[str, Any] = tokenizer.tokenize(a ) lowercase__ : List[str] = rust_tokenizer.tokenize(a ) self.assertListEqual(a , a ) lowercase__ : Optional[int] = tokenizer.encode(a , add_special_tokens=a ) lowercase__ : Dict = rust_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) lowercase__ : Optional[Any] = self.get_rust_tokenizer() lowercase__ : Union[str, Any] = tokenizer.encode(a ) lowercase__ : List[str] = rust_tokenizer.encode(a ) self.assertListEqual(a , a ) def _UpperCAmelCase ( self ) -> Any: lowercase__ : Union[str, Any] = AlbertTokenizer(a , keep_accents=a ) lowercase__ : Optional[int] = tokenizer.tokenize('This is a test' ) self.assertListEqual(a , ['▁this', '▁is', '▁a', '▁test'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [4_8, 2_5, 2_1, 1_2_8_9] ) lowercase__ : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( a , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] ) lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(a ) self.assertListEqual(a , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] ) lowercase__ : List[Any] = tokenizer.convert_ids_to_tokens(a ) self.assertListEqual( a , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : Optional[Any] = AlbertTokenizer(a ) lowercase__ : Any = tokenizer.encode('sequence builders' ) lowercase__ : Dict = tokenizer.encode('multi-sequence build' ) lowercase__ : str = tokenizer.build_inputs_with_special_tokens(a ) lowercase__ : Dict = tokenizer.build_inputs_with_special_tokens(a , a ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _UpperCAmelCase ( self ) -> Dict: # fmt: off lowercase__ : Tuple = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
77
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( BaseOutput, OptionalDependencyNotAvailable, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) @dataclass class UpperCAmelCase_ ( _a): lowerCamelCase__ : Union[List[PIL.Image.Image], np.ndarray] lowerCamelCase__ : Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionPixaPixZeroPipeline, ) else: from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline try: if not ( is_torch_available() and is_transformers_available() and is_k_diffusion_available() and is_k_diffusion_version(">=", "0.0.12") ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * # noqa F403 else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline if is_transformers_available() and is_flax_available(): import flax @flax.struct.dataclass class UpperCAmelCase_ ( _a): lowerCamelCase__ : np.ndarray lowerCamelCase__ : List[bool] from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
77
1
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str]=0.999 , UpperCAmelCase_ : str="cosine" , ): """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(UpperCAmelCase_ : List[str] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(UpperCAmelCase_ : List[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) a :Union[str, Any] = [] for i in range(UpperCAmelCase_ ): a :Optional[Any] = i / num_diffusion_timesteps a :int = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(UpperCAmelCase_ ) / alpha_bar_fn(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) return torch.tensor(UpperCAmelCase_ , dtype=torch.floataa ) class _snake_case ( _snake_case , _snake_case ): @register_to_config def __init__( self , _lowerCamelCase = 1000 , _lowerCamelCase = "fixed_small_log" , _lowerCamelCase = True , _lowerCamelCase = 1.0 , _lowerCamelCase = "epsilon" , _lowerCamelCase = "squaredcos_cap_v2" , ): if beta_schedule != "squaredcos_cap_v2": raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' ) a :List[Any] = betas_for_alpha_bar(_lowerCamelCase ) a :Any = 1.0 - self.betas a :int = torch.cumprod(self.alphas , dim=0 ) a :Union[str, Any] = torch.tensor(1.0 ) # standard deviation of the initial noise distribution a :List[Any] = 1.0 # setable values a :Optional[Any] = None a :List[str] = torch.from_numpy(np.arange(0 , _lowerCamelCase )[::-1].copy() ) a :List[str] = variance_type def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): return sample def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): a :Optional[int] = num_inference_steps a :Union[str, Any] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) a :int = (np.arange(0 , _lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) a :List[str] = torch.from_numpy(_lowerCamelCase ).to(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ): if prev_timestep is None: a :Union[str, Any] = t - 1 a :Dict = self.alphas_cumprod[t] a :str = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one a :Optional[Any] = 1 - alpha_prod_t a :Tuple = 1 - alpha_prod_t_prev if prev_timestep == t - 1: a :int = self.betas[t] else: a :Tuple = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample a :str = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: a :Optional[int] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": a :Dict = torch.log(torch.clamp(_lowerCamelCase , min=1e-20 ) ) a :Optional[Any] = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler a :List[Any] = variance.log() a :Any = beta.log() a :List[Any] = (predicted_variance + 1) / 2 a :Dict = frac * max_log + (1 - frac) * min_log return variance def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = True , ): a :Optional[int] = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": a :Optional[int] = torch.split(_lowerCamelCase , sample.shape[1] , dim=1 ) else: a :int = None # 1. compute alphas, betas if prev_timestep is None: a :Any = t - 1 a :Tuple = self.alphas_cumprod[t] a :Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one a :Tuple = 1 - alpha_prod_t a :List[str] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: a :Union[str, Any] = self.betas[t] a :Optional[Any] = self.alphas[t] else: a :Dict = 1 - alpha_prod_t / alpha_prod_t_prev a :Optional[Any] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": a :List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": a :List[str] = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' ''' for the UnCLIPScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: a :List[str] = torch.clamp( _lowerCamelCase , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf a :List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t a :Any = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf a :Optional[int] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise a :Dict = 0 if t > 0: a :Optional[int] = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=_lowerCamelCase , device=model_output.device ) a :Dict = self._get_variance( _lowerCamelCase , predicted_variance=_lowerCamelCase , prev_timestep=_lowerCamelCase , ) if self.variance_type == "fixed_small_log": a :Optional[int] = variance elif self.variance_type == "learned_range": a :Union[str, Any] = (0.5 * variance).exp() else: raise ValueError( F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' ''' for the UnCLIPScheduler.''' ) a :Optional[int] = variance * variance_noise a :List[str] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=_lowerCamelCase , pred_original_sample=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): # Make sure alphas_cumprod and timestep have same device and dtype as original_samples a :List[Any] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) a :Optional[Any] = timesteps.to(original_samples.device ) a :Tuple = alphas_cumprod[timesteps] ** 0.5 a :List[Any] = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): a :Any = sqrt_alpha_prod.unsqueeze(-1 ) a :List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5 a :Optional[int] = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): a :List[Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) a :str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
362
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _snake_case ( _snake_case , _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = CycleDiffusionPipeline SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'negative_prompt', 'height', 'width', 'negative_prompt_embeds', } SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {'latents'} SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} ) SCREAMING_SNAKE_CASE__ = IMAGE_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def SCREAMING_SNAKE_CASE__ ( self ): torch.manual_seed(0 ) a :Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) a :List[str] = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , ) torch.manual_seed(0 ) a :List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) a :Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) a :str = CLIPTextModel(_lowerCamelCase ) a :List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) a :Union[str, Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ): a :Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) a :Tuple = image / 2 + 0.5 if str(_lowerCamelCase ).startswith('''mps''' ): a :List[str] = torch.manual_seed(_lowerCamelCase ) else: a :Any = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) a :int = { '''prompt''': '''An astronaut riding an elephant''', '''source_prompt''': '''An astronaut riding a horse''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''eta''': 0.1, '''strength''': 0.8, '''guidance_scale''': 3, '''source_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE__ ( self ): a :Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator a :Optional[Any] = self.get_dummy_components() a :Dict = CycleDiffusionPipeline(**_lowerCamelCase ) a :Optional[Any] = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) a :List[str] = self.get_dummy_inputs(_lowerCamelCase ) a :Any = pipe(**_lowerCamelCase ) a :List[Any] = output.images a :str = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) a :List[Any] = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[int] = self.get_dummy_components() for name, module in components.items(): if hasattr(_lowerCamelCase , '''half''' ): a :Union[str, Any] = module.half() a :List[Any] = CycleDiffusionPipeline(**_lowerCamelCase ) a :Dict = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) a :Tuple = self.get_dummy_inputs(_lowerCamelCase ) a :Optional[int] = pipe(**_lowerCamelCase ) a :Optional[Any] = output.images a :List[Any] = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) a :str = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def SCREAMING_SNAKE_CASE__ ( self ): return super().test_save_load_local() @unittest.skip('''non-deterministic pipeline''' ) def SCREAMING_SNAKE_CASE__ ( self ): return super().test_inference_batch_single_identical() @skip_mps def SCREAMING_SNAKE_CASE__ ( self ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def SCREAMING_SNAKE_CASE__ ( self ): return super().test_save_load_optional_components() @skip_mps def SCREAMING_SNAKE_CASE__ ( self ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self ): a :str = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) a :Optional[int] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' ) a :Optional[Any] = init_image.resize((512, 512) ) a :List[str] = '''CompVis/stable-diffusion-v1-4''' a :List[str] = DDIMScheduler.from_pretrained(_lowerCamelCase , subfolder='''scheduler''' ) a :Tuple = CycleDiffusionPipeline.from_pretrained( _lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa , revision='''fp16''' ) pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) pipe.enable_attention_slicing() a :Optional[Any] = '''A black colored car''' a :Any = '''A blue colored car''' a :str = torch.manual_seed(0 ) a :List[Any] = pipe( prompt=_lowerCamelCase , source_prompt=_lowerCamelCase , image=_lowerCamelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCamelCase , output_type='''np''' , ) a :int = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5e-1 def SCREAMING_SNAKE_CASE__ ( self ): a :str = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) a :Optional[int] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' ) a :List[str] = init_image.resize((512, 512) ) a :List[str] = '''CompVis/stable-diffusion-v1-4''' a :Any = DDIMScheduler.from_pretrained(_lowerCamelCase , subfolder='''scheduler''' ) a :int = CycleDiffusionPipeline.from_pretrained(_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase ) pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) pipe.enable_attention_slicing() a :Optional[int] = '''A black colored car''' a :Any = '''A blue colored car''' a :Optional[int] = torch.manual_seed(0 ) a :Union[str, Any] = pipe( prompt=_lowerCamelCase , source_prompt=_lowerCamelCase , image=_lowerCamelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCamelCase , output_type='''np''' , ) a :Optional[int] = output.images assert np.abs(image - expected_image ).max() < 2e-2
281
0
"""simple docstring""" import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py _UpperCAmelCase = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. _UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. _UpperCAmelCase = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") _UpperCAmelCase = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _UpperCAmelCase = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) _UpperCAmelCase = [ ("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""), ("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""), ("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""), ("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""), ("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""), ("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""), ("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""), ("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""), ("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""), ( """zero-shot-object-detection""", """MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForZeroShotObjectDetection""", ), ("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""), ("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""), ("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""), ("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""), ( """table-question-answering""", """MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForTableQuestionAnswering""", ), ("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""), ("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""), ( """next-sentence-prediction""", """MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""", """AutoModelForNextSentencePrediction""", ), ( """audio-frame-classification""", """MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioFrameClassification""", ), ("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""), ( """document-question-answering""", """MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForDocumentQuestionAnswering""", ), ( """visual-question-answering""", """MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForVisualQuestionAnswering""", ), ("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""), ( """zero-shot-image-classification""", """MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForZeroShotImageClassification""", ), ("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""), ("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""), ("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""), ] def __magic_name__ ( lowercase ): SCREAMING_SNAKE_CASE_: int =re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , lowercase ) return [m.group(0 ) for m in matches] def __magic_name__ ( ): SCREAMING_SNAKE_CASE_: List[str] =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE_: Optional[int] ={ config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. SCREAMING_SNAKE_CASE_: str =collections.defaultdict(lowercase ) SCREAMING_SNAKE_CASE_: int =collections.defaultdict(lowercase ) SCREAMING_SNAKE_CASE_: List[Any] =collections.defaultdict(lowercase ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(lowercase ): SCREAMING_SNAKE_CASE_: Optional[int] =None if _re_tf_models.match(lowercase ) is not None: SCREAMING_SNAKE_CASE_: Dict =tf_models SCREAMING_SNAKE_CASE_: str =_re_tf_models.match(lowercase ).groups()[0] elif _re_flax_models.match(lowercase ) is not None: SCREAMING_SNAKE_CASE_: Tuple =flax_models SCREAMING_SNAKE_CASE_: List[Any] =_re_flax_models.match(lowercase ).groups()[0] elif _re_pt_models.match(lowercase ) is not None: SCREAMING_SNAKE_CASE_: Any =pt_models SCREAMING_SNAKE_CASE_: int =_re_pt_models.match(lowercase ).groups()[0] if lookup_dict is not None: while len(lowercase ) > 0: if attr_name in model_prefix_to_model_type: SCREAMING_SNAKE_CASE_: Union[str, Any] =True break # Try again after removing the last word in the name SCREAMING_SNAKE_CASE_: Any ="""""".join(camel_case_split(lowercase )[:-1] ) SCREAMING_SNAKE_CASE_: Optional[Any] =set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) SCREAMING_SNAKE_CASE_: Optional[Any] =list(lowercase ) all_models.sort() SCREAMING_SNAKE_CASE_: str ={"""model_type""": all_models} SCREAMING_SNAKE_CASE_: str =[pt_models[t] for t in all_models] SCREAMING_SNAKE_CASE_: Union[str, Any] =[tf_models[t] for t in all_models] SCREAMING_SNAKE_CASE_: List[str] =[flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure SCREAMING_SNAKE_CASE_: str ={} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: SCREAMING_SNAKE_CASE_: Optional[Any] ="""AutoProcessor""" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: SCREAMING_SNAKE_CASE_: Tuple ="""AutoTokenizer""" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: SCREAMING_SNAKE_CASE_: Optional[int] ="""AutoFeatureExtractor""" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. SCREAMING_SNAKE_CASE_: Tuple ="""AutoTokenizer""" SCREAMING_SNAKE_CASE_: Union[str, Any] =[processors[t] for t in all_models] return pd.DataFrame(lowercase ) def __magic_name__ ( lowercase ): SCREAMING_SNAKE_CASE_: List[str] =[ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: SCREAMING_SNAKE_CASE_: Optional[int] =[model_mapping, f'''TF_{model_mapping}''', f'''FLAX_{model_mapping}'''] SCREAMING_SNAKE_CASE_: Any =[auto_class, f'''TF_{auto_class}''', f'''Flax_{auto_class}'''] # Loop through all three frameworks for module, cls, mapping in zip(lowercase , lowercase , lowercase ): # The type of pipeline may not exist in this framework if not hasattr(lowercase , lowercase ): continue # First extract all model_names SCREAMING_SNAKE_CASE_: List[str] =[] for name in getattr(lowercase , lowercase ).values(): if isinstance(lowercase , lowercase ): model_names.append(lowercase ) else: model_names.extend(list(lowercase ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def __magic_name__ ( lowercase , lowercase ): SCREAMING_SNAKE_CASE_: int =get_frameworks_table() SCREAMING_SNAKE_CASE_: Tuple =Dataset.from_pandas(lowercase ) SCREAMING_SNAKE_CASE_: Any =hf_hub_download( """huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=lowercase ) SCREAMING_SNAKE_CASE_: Optional[Any] =Dataset.from_json(lowercase ) SCREAMING_SNAKE_CASE_: int ={ tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""]) for i in range(len(lowercase ) ) } SCREAMING_SNAKE_CASE_: Union[str, Any] =update_pipeline_and_auto_class_table(lowercase ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. SCREAMING_SNAKE_CASE_: str =sorted(table.keys() ) SCREAMING_SNAKE_CASE_: Union[str, Any] =pd.DataFrame( { """model_class""": model_classes, """pipeline_tag""": [table[m][0] for m in model_classes], """auto_class""": [table[m][1] for m in model_classes], } ) SCREAMING_SNAKE_CASE_: Optional[Any] =Dataset.from_pandas(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(lowercase , """frameworks.json""" ) ) tags_dataset.to_json(os.path.join(lowercase , """pipeline_tags.json""" ) ) if commit_sha is not None: SCREAMING_SNAKE_CASE_: Optional[int] =( f'''Update with commit {commit_sha}\n\nSee: ''' f'''https://github.com/huggingface/transformers/commit/{commit_sha}''' ) else: SCREAMING_SNAKE_CASE_: List[Any] ="""Update""" upload_folder( repo_id="""huggingface/transformers-metadata""" , folder_path=lowercase , repo_type="""dataset""" , token=lowercase , commit_message=lowercase , ) def __magic_name__ ( ): SCREAMING_SNAKE_CASE_: List[Any] ={tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} SCREAMING_SNAKE_CASE_: List[str] =transformers_module.pipelines.SUPPORTED_TASKS SCREAMING_SNAKE_CASE_: Optional[Any] =[] for key in pipeline_tasks: if key not in in_table: SCREAMING_SNAKE_CASE_: Dict =pipeline_tasks[key]["""pt"""] if isinstance(lowercase , (list, tuple) ): SCREAMING_SNAKE_CASE_: Tuple =model[0] SCREAMING_SNAKE_CASE_: Tuple =model.__name__ if model not in in_table.values(): missing.append(lowercase ) if len(lowercase ) > 0: SCREAMING_SNAKE_CASE_: Optional[Any] =""", """.join(lowercase ) raise ValueError( """The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """ f'''`utils/update_metadata.py`: {msg}. Please add them!''' ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""") parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""") parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""") _UpperCAmelCase = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
173
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a ( UpperCAmelCase__ , unittest.TestCase ): UpperCamelCase : int = KandinskyInpaintPipeline UpperCamelCase : Optional[Any] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image'] UpperCamelCase : int = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] UpperCamelCase : Any = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] UpperCamelCase : Tuple = False @property def lowerCamelCase__ ( self : Union[str, Any] ) -> str: '''simple docstring''' return 32 @property def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' return 32 @property def lowerCamelCase__ ( self : List[Any] ) -> int: '''simple docstring''' return self.time_input_dim @property def lowerCamelCase__ ( self : Optional[Any] ) -> int: '''simple docstring''' return self.time_input_dim * 4 @property def lowerCamelCase__ ( self : Dict ) -> List[Any]: '''simple docstring''' return 100 @property def lowerCamelCase__ ( self : str ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE_: int =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def lowerCamelCase__ ( self : Dict ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_: Optional[Any] =MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) SCREAMING_SNAKE_CASE_: List[str] =MultilingualCLIP(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: str =text_encoder.eval() return text_encoder @property def lowerCamelCase__ ( self : Optional[int] ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_: Optional[Any] ={ """in_channels""": 9, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } SCREAMING_SNAKE_CASE_: str =UNetaDConditionModel(**lowerCAmelCase ) return model @property def lowerCamelCase__ ( self : Any ) -> Tuple: '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCamelCase__ ( self : List[Any] ) -> List[Any]: '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_: List[str] =VQModel(**self.dummy_movq_kwargs ) return model def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE_: Tuple =self.dummy_text_encoder SCREAMING_SNAKE_CASE_: Optional[Any] =self.dummy_tokenizer SCREAMING_SNAKE_CASE_: List[str] =self.dummy_unet SCREAMING_SNAKE_CASE_: Union[str, Any] =self.dummy_movq SCREAMING_SNAKE_CASE_: int =DDIMScheduler( num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowerCAmelCase , ) SCREAMING_SNAKE_CASE_: str ={ """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str]=0 ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: int =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase ) # create init_image SCREAMING_SNAKE_CASE_: List[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] =image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE_: List[str] =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) ) # create mask SCREAMING_SNAKE_CASE_: Dict =np.ones((64, 64) , dtype=np.floataa ) SCREAMING_SNAKE_CASE_: Optional[Any] =0 if str(lowerCAmelCase ).startswith("""mps""" ): SCREAMING_SNAKE_CASE_: Optional[int] =torch.manual_seed(lowerCAmelCase ) else: SCREAMING_SNAKE_CASE_: List[Any] =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple ={ """prompt""": """horse""", """image""": init_image, """mask_image""": mask, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 2, """guidance_scale""": 4.0, """output_type""": """np""", } return inputs def lowerCamelCase__ ( self : List[str] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Dict ="""cpu""" SCREAMING_SNAKE_CASE_: List[Any] =self.get_dummy_components() SCREAMING_SNAKE_CASE_: Optional[int] =self.pipeline_class(**lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] =pipe(**self.get_dummy_inputs(lowerCAmelCase ) ) SCREAMING_SNAKE_CASE_: int =output.images SCREAMING_SNAKE_CASE_: Optional[int] =pipe( **self.get_dummy_inputs(lowerCAmelCase ) , return_dict=lowerCAmelCase , )[0] SCREAMING_SNAKE_CASE_: Tuple =image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_: Optional[int] =image_from_tuple[0, -3:, -3:, -1] print(f'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE_: List[Any] =np.array( [0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class a ( unittest.TestCase ): def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self : List[Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE_: Any =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" ) SCREAMING_SNAKE_CASE_: str =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) SCREAMING_SNAKE_CASE_: List[str] =np.ones((768, 768) , dtype=np.floataa ) SCREAMING_SNAKE_CASE_: List[str] =0 SCREAMING_SNAKE_CASE_: Union[str, Any] ="""a hat""" SCREAMING_SNAKE_CASE_: str =KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[Any] =KandinskyInpaintPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE_: List[str] =pipeline.to(lowerCAmelCase ) pipeline.set_progress_bar_config(disable=lowerCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] =torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =pipe_prior( lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() SCREAMING_SNAKE_CASE_: List[Any] =pipeline( lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , image_embeds=lowerCAmelCase , negative_image_embeds=lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , ) SCREAMING_SNAKE_CASE_: int =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
173
1
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig lowerCAmelCase: List[Any] = logging.get_logger(__name__) # General docstring lowerCAmelCase: Optional[int] = 'RegNetConfig' # Base docstring lowerCAmelCase: Tuple = 'facebook/regnet-y-040' lowerCAmelCase: str = [1, 1_0_8_8, 7, 7] # Image classification docstring lowerCAmelCase: Any = 'facebook/regnet-y-040' lowerCAmelCase: List[Any] = 'tabby, tabby cat' lowerCAmelCase: Optional[Any] = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class a__( nn.Module ): def __init__( self : str , __snake_case : int , __snake_case : int , __snake_case : int = 3 , __snake_case : int = 1 , __snake_case : int = 1 , __snake_case : Optional[str] = "relu" , ): super().__init__() a : Tuple = nn.Convad( __snake_case , __snake_case , kernel_size=__snake_case , stride=__snake_case , padding=kernel_size // 2 , groups=__snake_case , bias=__snake_case , ) a : Dict = nn.BatchNormad(__snake_case ) a : Tuple = ACTaFN[activation] if activation is not None else nn.Identity() def lowercase_ ( self : Dict , __snake_case : int ): a : int = self.convolution(__snake_case ) a : int = self.normalization(__snake_case ) a : Any = self.activation(__snake_case ) return hidden_state class a__( nn.Module ): def __init__( self : List[str] , __snake_case : RegNetConfig ): super().__init__() a : Optional[int] = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) a : Optional[int] = config.num_channels def lowercase_ ( self : int , __snake_case : List[str] ): a : List[Any] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) a : List[str] = self.embedder(__snake_case ) return hidden_state class a__( nn.Module ): def __init__( self : Tuple , __snake_case : int , __snake_case : int , __snake_case : int = 2 ): super().__init__() a : Optional[int] = nn.Convad(__snake_case , __snake_case , kernel_size=1 , stride=__snake_case , bias=__snake_case ) a : Optional[Any] = nn.BatchNormad(__snake_case ) def lowercase_ ( self : Union[str, Any] , __snake_case : Tensor ): a : Any = self.convolution(__snake_case ) a : Dict = self.normalization(__snake_case ) return hidden_state class a__( nn.Module ): def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int ): super().__init__() a : Any = nn.AdaptiveAvgPoolad((1, 1) ) a : Dict = nn.Sequential( nn.Convad(__snake_case , __snake_case , kernel_size=1 ) , nn.ReLU() , nn.Convad(__snake_case , __snake_case , kernel_size=1 ) , nn.Sigmoid() , ) def lowercase_ ( self : List[str] , __snake_case : List[Any] ): # b c h w -> b c 1 1 a : Any = self.pooler(__snake_case ) a : Optional[Any] = self.attention(__snake_case ) a : Tuple = hidden_state * attention return hidden_state class a__( nn.Module ): def __init__( self : List[str] , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 ): super().__init__() a : Union[str, Any] = in_channels != out_channels or stride != 1 a : Dict = max(1 , out_channels // config.groups_width ) a : List[str] = ( RegNetShortCut(__snake_case , __snake_case , stride=__snake_case ) if should_apply_shortcut else nn.Identity() ) a : int = nn.Sequential( RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=__snake_case ) , ) a : int = ACTaFN[config.hidden_act] def lowercase_ ( self : Union[str, Any] , __snake_case : Tuple ): a : List[str] = hidden_state a : Dict = self.layer(__snake_case ) a : Tuple = self.shortcut(__snake_case ) hidden_state += residual a : Optional[int] = self.activation(__snake_case ) return hidden_state class a__( nn.Module ): def __init__( self : List[str] , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 1 ): super().__init__() a : Union[str, Any] = in_channels != out_channels or stride != 1 a : Dict = max(1 , out_channels // config.groups_width ) a : Dict = ( RegNetShortCut(__snake_case , __snake_case , stride=__snake_case ) if should_apply_shortcut else nn.Identity() ) a : List[str] = nn.Sequential( RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__snake_case , __snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act ) , RegNetSELayer(__snake_case , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__snake_case , __snake_case , kernel_size=1 , activation=__snake_case ) , ) a : Union[str, Any] = ACTaFN[config.hidden_act] def lowercase_ ( self : Any , __snake_case : int ): a : Tuple = hidden_state a : Tuple = self.layer(__snake_case ) a : Optional[int] = self.shortcut(__snake_case ) hidden_state += residual a : Any = self.activation(__snake_case ) return hidden_state class a__( nn.Module ): def __init__( self : int , __snake_case : RegNetConfig , __snake_case : int , __snake_case : int , __snake_case : int = 2 , __snake_case : int = 2 , ): super().__init__() a : int = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer a : Any = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( __snake_case , __snake_case , __snake_case , stride=__snake_case , ) , *[layer(__snake_case , __snake_case , __snake_case ) for _ in range(depth - 1 )] , ) def lowercase_ ( self : Optional[Any] , __snake_case : str ): a : Dict = self.layers(__snake_case ) return hidden_state class a__( nn.Module ): def __init__( self : List[Any] , __snake_case : RegNetConfig ): super().__init__() a : Dict = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( __snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) a : str = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(__snake_case , config.depths[1:] ): self.stages.append(RegNetStage(__snake_case , __snake_case , __snake_case , depth=__snake_case ) ) def lowercase_ ( self : Union[str, Any] , __snake_case : Tensor , __snake_case : bool = False , __snake_case : bool = True ): a : Union[str, Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: a : List[str] = hidden_states + (hidden_state,) a : int = stage_module(__snake_case ) if output_hidden_states: a : Union[str, Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__snake_case , hidden_states=__snake_case ) class a__( lowerCamelCase__ ): lowercase__ = RegNetConfig lowercase__ = """regnet""" lowercase__ = """pixel_values""" lowercase__ = True def lowercase_ ( self : Optional[Any] , __snake_case : Optional[int] ): if isinstance(__snake_case , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowercase_ ( self : Dict , __snake_case : List[str] , __snake_case : Dict=False ): if isinstance(__snake_case , __snake_case ): a : Optional[Any] = value lowerCAmelCase: Union[str, Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' lowerCAmelCase: int = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( """The bare RegNet model outputting raw features without any specific head on top.""" , lowerCamelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class a__( lowerCamelCase__ ): def __init__( self : Dict , __snake_case : Dict ): super().__init__(__snake_case ) a : str = config a : str = RegNetEmbeddings(__snake_case ) a : Dict = RegNetEncoder(__snake_case ) a : int = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowercase_ ( self : Optional[Any] , __snake_case : Tensor , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None ): a : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict a : int = self.embedder(__snake_case ) a : Optional[int] = self.encoder( __snake_case , output_hidden_states=__snake_case , return_dict=__snake_case ) a : List[Any] = encoder_outputs[0] a : Any = self.pooler(__snake_case ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( """ RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ , lowerCamelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class a__( lowerCamelCase__ ): def __init__( self : int , __snake_case : Optional[int] ): super().__init__(__snake_case ) a : Optional[Any] = config.num_labels a : Tuple = RegNetModel(__snake_case ) # classification head a : List[Any] = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowercase_ ( self : Optional[Any] , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[torch.LongTensor] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , ): a : str = return_dict if return_dict is not None else self.config.use_return_dict a : Optional[Any] = self.regnet(__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case ) a : Any = outputs.pooler_output if return_dict else outputs[1] a : str = self.classifier(__snake_case ) a : Optional[int] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: a : List[str] = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): a : int = 'single_label_classification' else: a : Dict = 'multi_label_classification' if self.config.problem_type == "regression": a : Any = MSELoss() if self.num_labels == 1: a : int = loss_fct(logits.squeeze() , labels.squeeze() ) else: a : List[str] = loss_fct(__snake_case , __snake_case ) elif self.config.problem_type == "single_label_classification": a : Union[str, Any] = CrossEntropyLoss() a : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": a : Any = BCEWithLogitsLoss() a : Tuple = loss_fct(__snake_case , __snake_case ) if not return_dict: a : Optional[int] = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states )
96
'''simple docstring''' import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class a__( lowerCamelCase__ , unittest.TestCase ): lowercase__ = CTRLTokenizer lowercase__ = False lowercase__ = False def lowercase_ ( self : Dict ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a : Tuple = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>'] a : Union[str, Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) ) a : Union[str, Any] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', ''] a : Optional[Any] = {'unk_token': '<unk>'} a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__snake_case ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__snake_case ) ) def lowercase_ ( self : int , **__snake_case : str ): kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **__snake_case ) def lowercase_ ( self : Optional[int] , __snake_case : Any ): a : int = 'adapt react readapt apt' a : Any = 'adapt react readapt apt' return input_text, output_text def lowercase_ ( self : Dict ): a : Dict = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a : List[str] = 'adapt react readapt apt' a : Dict = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split() a : Any = tokenizer.tokenize(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) a : Dict = tokens + [tokenizer.unk_token] a : Optional[Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
96
1
"""simple docstring""" from __future__ import annotations __magic_name__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] __magic_name__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = len(UpperCamelCase_ ) for i in range(UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = -1 for j in range(i + 1 , UpperCamelCase_ ): if arr[i] < arr[j]: __SCREAMING_SNAKE_CASE = arr[j] break result.append(UpperCamelCase_ ) return result def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = [] for i, outer in enumerate(UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = -1 for inner in arr[i + 1 :]: if outer < inner: __SCREAMING_SNAKE_CASE = inner break result.append(UpperCamelCase_ ) return result def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = len(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [-1] * arr_size for index in reversed(range(UpperCamelCase_ ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: __SCREAMING_SNAKE_CASE = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) __magic_name__ = ( "from __main__ import arr, next_greatest_element_slow, " "next_greatest_element_fast, next_greatest_element" ) print( "next_greatest_element_slow():", timeit("next_greatest_element_slow(arr)", setup=setup), ) print( "next_greatest_element_fast():", timeit("next_greatest_element_fast(arr)", setup=setup), ) print( " next_greatest_element():", timeit("next_greatest_element(arr)", setup=setup), )
100
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __A ( a , a , unittest.TestCase ): """simple docstring""" UpperCamelCase__ : Optional[Any] =StableDiffusionDiffEditPipeline UpperCamelCase__ : str =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""} UpperCamelCase__ : Optional[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""} UpperCamelCase__ : Dict =frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCamelCase__ : Any =frozenset([] ) def __lowercase ( self ): """simple docstring""" torch.manual_seed(0 ) __UpperCamelCase : Dict =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , ) __UpperCamelCase : List[str] =DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , ) __UpperCamelCase : Union[str, Any] =DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_zero=lowerCamelCase__ , ) torch.manual_seed(0 ) __UpperCamelCase : Optional[int] =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __UpperCamelCase : Tuple =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) __UpperCamelCase : Any =CLIPTextModel(lowerCamelCase__ ) __UpperCamelCase : int =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __UpperCamelCase : Union[str, Any] ={ 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ): """simple docstring""" __UpperCamelCase : int =floats_tensor((1, 16, 16) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) __UpperCamelCase : Any =floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) if str(lowerCamelCase__ ).startswith('mps' ): __UpperCamelCase : Any =torch.manual_seed(lowerCamelCase__ ) else: __UpperCamelCase : Optional[int] =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) __UpperCamelCase : Dict ={ 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ): """simple docstring""" __UpperCamelCase : Tuple =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) __UpperCamelCase : int =image.cpu().permute(0 , 2 , 3 , 1 )[0] __UpperCamelCase : Optional[Any] =Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ) if str(lowerCamelCase__ ).startswith('mps' ): __UpperCamelCase : List[Any] =torch.manual_seed(lowerCamelCase__ ) else: __UpperCamelCase : Any =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) __UpperCamelCase : Optional[int] ={ 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ): """simple docstring""" __UpperCamelCase : str =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) __UpperCamelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 )[0] __UpperCamelCase : int =Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ) if str(lowerCamelCase__ ).startswith('mps' ): __UpperCamelCase : Any =torch.manual_seed(lowerCamelCase__ ) else: __UpperCamelCase : int =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) __UpperCamelCase : Optional[int] ={ 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def __lowercase ( self ): """simple docstring""" if not hasattr(self.pipeline_class , '_optional_components' ): return __UpperCamelCase : Optional[Any] =self.get_dummy_components() __UpperCamelCase : List[str] =self.pipeline_class(**lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) __UpperCamelCase : Union[str, Any] =self.get_dummy_inputs(lowerCamelCase__ ) __UpperCamelCase : List[Any] =pipe(**lowerCamelCase__ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase__ ) __UpperCamelCase : Tuple =self.pipeline_class.from_pretrained(lowerCamelCase__ ) pipe_loaded.to(lowerCamelCase__ ) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase__ ) for optional_component in pipe._optional_components: self.assertTrue( getattr(lowerCamelCase__ , lowerCamelCase__ ) is None , f'`{optional_component}` did not stay set to None after loading.' , ) __UpperCamelCase : str =self.get_dummy_inputs(lowerCamelCase__ ) __UpperCamelCase : Union[str, Any] =pipe_loaded(**lowerCamelCase__ )[0] __UpperCamelCase : Tuple =np.abs(output - output_loaded ).max() self.assertLess(lowerCamelCase__ , 1E-4 ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any ='cpu' __UpperCamelCase : Union[str, Any] =self.get_dummy_components() __UpperCamelCase : Any =self.pipeline_class(**lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : int =self.get_dummy_mask_inputs(lowerCamelCase__ ) __UpperCamelCase : Union[str, Any] =pipe.generate_mask(**lowerCamelCase__ ) __UpperCamelCase : int =mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) __UpperCamelCase : Tuple =np.array([0] * 9 ) __UpperCamelCase : str =np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase__ , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : int ='cpu' __UpperCamelCase : Union[str, Any] =self.get_dummy_components() __UpperCamelCase : Optional[Any] =self.pipeline_class(**lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : Dict =self.get_dummy_inversion_inputs(lowerCamelCase__ ) __UpperCamelCase : List[Any] =pipe.invert(**lowerCamelCase__ ).images __UpperCamelCase : Optional[Any] =image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) __UpperCamelCase : List[str] =np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) __UpperCamelCase : int =np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase__ , 1E-3 ) def __lowercase ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] ='cpu' __UpperCamelCase : int =self.get_dummy_components() __UpperCamelCase : str ={'beta_start': 0.00_085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'} __UpperCamelCase : str =DPMSolverMultistepScheduler(**lowerCamelCase__ ) __UpperCamelCase : Dict =DPMSolverMultistepInverseScheduler(**lowerCamelCase__ ) __UpperCamelCase : Any =self.pipeline_class(**lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : Tuple =self.get_dummy_inversion_inputs(lowerCamelCase__ ) __UpperCamelCase : str =pipe.invert(**lowerCamelCase__ ).images __UpperCamelCase : List[Any] =image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) __UpperCamelCase : List[str] =np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) __UpperCamelCase : Optional[Any] =np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase__ , 1E-3 ) @require_torch_gpu @slow class __A ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def __lowercase ( cls ): """simple docstring""" __UpperCamelCase : Optional[int] =load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) __UpperCamelCase : Union[str, Any] =raw_image.convert('RGB' ).resize((768, 768) ) __UpperCamelCase : List[Any] =raw_image def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[int] =torch.manual_seed(0 ) __UpperCamelCase : Dict =StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa ) __UpperCamelCase : List[str] =DDIMScheduler.from_config(pipe.scheduler.config ) __UpperCamelCase : List[str] =DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : List[str] ='a bowl of fruit' __UpperCamelCase : Dict ='a bowl of pears' __UpperCamelCase : Tuple =pipe.generate_mask( image=self.raw_image , source_prompt=lowerCamelCase__ , target_prompt=lowerCamelCase__ , generator=lowerCamelCase__ , ) __UpperCamelCase : int =pipe.invert( prompt=lowerCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCamelCase__ ).latents __UpperCamelCase : Dict =pipe( prompt=lowerCamelCase__ , mask_image=lowerCamelCase__ , image_latents=lowerCamelCase__ , generator=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , inpaint_strength=0.7 , output_type='numpy' , ).images[0] __UpperCamelCase : str =( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =torch.manual_seed(0 ) __UpperCamelCase : List[Any] =StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa ) __UpperCamelCase : Optional[Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) __UpperCamelCase : Optional[int] =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : Optional[Any] ='a bowl of fruit' __UpperCamelCase : int ='a bowl of pears' __UpperCamelCase : str =pipe.generate_mask( image=self.raw_image , source_prompt=lowerCamelCase__ , target_prompt=lowerCamelCase__ , generator=lowerCamelCase__ , ) __UpperCamelCase : List[str] =pipe.invert( prompt=lowerCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCamelCase__ , num_inference_steps=25 , ).latents __UpperCamelCase : List[str] =pipe( prompt=lowerCamelCase__ , mask_image=lowerCamelCase__ , image_latents=lowerCamelCase__ , generator=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0] __UpperCamelCase : Tuple =( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
71
0
from __future__ import annotations from typing import Any class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" pass class __lowerCamelCase : """simple docstring""" def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> None: lowerCAmelCase__ = data lowerCAmelCase__ = None def __iter__( self : List[Any] ) -> List[str]: lowerCAmelCase__ = self lowerCAmelCase__ = [] while node: if node in visited: raise ContainsLoopError visited.append(SCREAMING_SNAKE_CASE__ ) yield node.data lowerCAmelCase__ = node.next_node @property def a ( self : Union[str, Any] ) -> bool: try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": UpperCamelCase = Node(1) UpperCamelCase = Node(2) UpperCamelCase = Node(3) UpperCamelCase = Node(4) print(root_node.has_loop) # False UpperCamelCase = root_node.next_node print(root_node.has_loop) # True UpperCamelCase = Node(5) UpperCamelCase = Node(6) UpperCamelCase = Node(5) UpperCamelCase = Node(6) print(root_node.has_loop) # False UpperCamelCase = Node(1) print(root_node.has_loop) # False
221
import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# UpperCamelCase = [ # (stable-diffusion, HF Diffusers) ('time_embed.0.weight', 'time_embedding.linear_1.weight'), ('time_embed.0.bias', 'time_embedding.linear_1.bias'), ('time_embed.2.weight', 'time_embedding.linear_2.weight'), ('time_embed.2.bias', 'time_embedding.linear_2.bias'), ('input_blocks.0.0.weight', 'conv_in.weight'), ('input_blocks.0.0.bias', 'conv_in.bias'), ('out.0.weight', 'conv_norm_out.weight'), ('out.0.bias', 'conv_norm_out.bias'), ('out.2.weight', 'conv_out.weight'), ('out.2.bias', 'conv_out.bias'), ] UpperCamelCase = [ # (stable-diffusion, HF Diffusers) ('in_layers.0', 'norm1'), ('in_layers.2', 'conv1'), ('out_layers.0', 'norm2'), ('out_layers.3', 'conv2'), ('emb_layers.1', 'time_emb_proj'), ('skip_connection', 'conv_shortcut'), ] UpperCamelCase = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks UpperCamelCase = F"""down_blocks.{i}.resnets.{j}.""" UpperCamelCase = F"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 UpperCamelCase = F"""down_blocks.{i}.attentions.{j}.""" UpperCamelCase = F"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks UpperCamelCase = F"""up_blocks.{i}.resnets.{j}.""" UpperCamelCase = F"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 UpperCamelCase = F"""up_blocks.{i}.attentions.{j}.""" UpperCamelCase = F"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 UpperCamelCase = F"""down_blocks.{i}.downsamplers.0.conv.""" UpperCamelCase = F"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 UpperCamelCase = F"""up_blocks.{i}.upsamplers.0.""" UpperCamelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) UpperCamelCase = 'mid_block.attentions.0.' UpperCamelCase = 'middle_block.1.' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): UpperCamelCase = F"""mid_block.resnets.{j}.""" UpperCamelCase = F"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def _A ( lowerCAmelCase_ : Any ): """simple docstring""" lowerCAmelCase__ = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: lowerCAmelCase__ = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase__ = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase__ = v lowerCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# UpperCamelCase = [ # (stable-diffusion, HF Diffusers) ('nin_shortcut', 'conv_shortcut'), ('norm_out', 'conv_norm_out'), ('mid.attn_1.', 'mid_block.attentions.0.'), ] for i in range(4): # down_blocks have two resnets for j in range(2): UpperCamelCase = F"""encoder.down_blocks.{i}.resnets.{j}.""" UpperCamelCase = F"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: UpperCamelCase = F"""down_blocks.{i}.downsamplers.0.""" UpperCamelCase = F"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) UpperCamelCase = F"""up_blocks.{i}.upsamplers.0.""" UpperCamelCase = F"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): UpperCamelCase = F"""decoder.up_blocks.{i}.resnets.{j}.""" UpperCamelCase = F"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): UpperCamelCase = F"""mid_block.resnets.{i}.""" UpperCamelCase = F"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) UpperCamelCase = [ # (stable-diffusion, HF Diffusers) ('norm.', 'group_norm.'), ('q.', 'query.'), ('k.', 'key.'), ('v.', 'value.'), ('proj_out.', 'proj_attn.'), ] def _A ( lowerCAmelCase_ : List[str] ): """simple docstring""" return w.reshape(*w.shape , 1 , 1 ) def _A ( lowerCAmelCase_ : Dict ): """simple docstring""" lowerCAmelCase__ = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase__ = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase__ = v lowerCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()} lowerCAmelCase__ = ["q", "k", "v", "proj_out"] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if F'mid.attn_1.{weight_name}.weight' in k: print(F'Reshaping {k} for SD format' ) lowerCAmelCase__ = reshape_weight_for_sd(lowerCAmelCase_ ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# UpperCamelCase = [ # (stable-diffusion, HF Diffusers) ('resblocks.', 'text_model.encoder.layers.'), ('ln_1', 'layer_norm1'), ('ln_2', 'layer_norm2'), ('.c_fc.', '.fc1.'), ('.c_proj.', '.fc2.'), ('.attn', '.self_attn'), ('ln_final.', 'transformer.text_model.final_layer_norm.'), ('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'), ('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'), ] UpperCamelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} UpperCamelCase = re.compile('|'.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp UpperCamelCase = {'q': 0, 'k': 1, 'v': 2} def _A ( lowerCAmelCase_ : Dict ): """simple docstring""" lowerCAmelCase__ = {} lowerCAmelCase__ = {} lowerCAmelCase__ = {} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight" ) or k.endswith(".self_attn.k_proj.weight" ) or k.endswith(".self_attn.v_proj.weight" ) ): lowerCAmelCase__ = k[: -len(".q_proj.weight" )] lowerCAmelCase__ = k[-len("q_proj.weight" )] if k_pre not in capture_qkv_weight: lowerCAmelCase__ = [None, None, None] lowerCAmelCase__ = v continue if ( k.endswith(".self_attn.q_proj.bias" ) or k.endswith(".self_attn.k_proj.bias" ) or k.endswith(".self_attn.v_proj.bias" ) ): lowerCAmelCase__ = k[: -len(".q_proj.bias" )] lowerCAmelCase__ = k[-len("q_proj.bias" )] if k_pre not in capture_qkv_bias: lowerCAmelCase__ = [None, None, None] lowerCAmelCase__ = v continue lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ ) lowerCAmelCase__ = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ ) lowerCAmelCase__ = torch.cat(lowerCAmelCase_ ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ ) lowerCAmelCase__ = torch.cat(lowerCAmelCase_ ) return new_state_dict def _A ( lowerCAmelCase_ : Any ): """simple docstring""" return text_enc_dict if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.' ) UpperCamelCase = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors UpperCamelCase = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors') UpperCamelCase = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors') UpperCamelCase = osp.join(args.model_path, 'text_encoder', 'model.safetensors') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): UpperCamelCase = load_file(unet_path, device='cpu') else: UpperCamelCase = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin') UpperCamelCase = torch.load(unet_path, map_location='cpu') if osp.exists(vae_path): UpperCamelCase = load_file(vae_path, device='cpu') else: UpperCamelCase = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin') UpperCamelCase = torch.load(vae_path, map_location='cpu') if osp.exists(text_enc_path): UpperCamelCase = load_file(text_enc_path, device='cpu') else: UpperCamelCase = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin') UpperCamelCase = torch.load(text_enc_path, map_location='cpu') # Convert the UNet model UpperCamelCase = convert_unet_state_dict(unet_state_dict) UpperCamelCase = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model UpperCamelCase = convert_vae_state_dict(vae_state_dict) UpperCamelCase = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper UpperCamelCase = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm UpperCamelCase = {'transformer.' + k: v for k, v in text_enc_dict.items()} UpperCamelCase = convert_text_enc_state_dict_vaa(text_enc_dict) UpperCamelCase = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()} else: UpperCamelCase = convert_text_enc_state_dict(text_enc_dict) UpperCamelCase = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint UpperCamelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: UpperCamelCase = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: UpperCamelCase = {'state_dict': state_dict} torch.save(state_dict, args.checkpoint_path)
221
1
'''simple docstring''' from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Union[str, Any] = { """huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""", } class lowerCamelCase_ (snake_case__ ): '''simple docstring''' __UpperCamelCase: Dict = "autoformer" __UpperCamelCase: Union[str, Any] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self : Union[str, Any] , A : List[Any] = None , A : List[Any] = None , A : List[str] = "student_t" , A : Optional[int] = "nll" , A : List[Any] = 1 , A : Tuple = [1, 2, 3, 4, 5, 6, 7] , A : int = True , A : str = 0 , A : List[str] = 0 , A : Optional[Any] = 0 , A : List[str] = 0 , A : Optional[Any] = None , A : Any = None , A : Union[str, Any] = 64 , A : Optional[int] = 2 , A : Union[str, Any] = 2 , A : Union[str, Any] = 2 , A : Optional[int] = 2 , A : int = 32 , A : Tuple = 32 , A : int = "gelu" , A : Optional[int] = 0.1 , A : List[Any] = 0.1 , A : str = 0.1 , A : Tuple = 0.1 , A : str = 0.1 , A : int = 100 , A : str = 0.02 , A : int = True , A : Union[str, Any]=True , A : List[Any] = 10 , A : int = 25 , A : Any = 3 , **A : Optional[Any] , ): # time series specific configuration _UpperCAmelCase : Dict = prediction_length _UpperCAmelCase : Dict = context_length if context_length is not None else prediction_length _UpperCAmelCase : Any = distribution_output _UpperCAmelCase : Optional[int] = loss _UpperCAmelCase : str = input_size _UpperCAmelCase : Optional[int] = num_time_features _UpperCAmelCase : Optional[int] = lags_sequence _UpperCAmelCase : Any = scaling _UpperCAmelCase : Optional[Any] = num_dynamic_real_features _UpperCAmelCase : List[Any] = num_static_real_features _UpperCAmelCase : Any = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(_lowerCAmelCase ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) _UpperCAmelCase : List[str] = cardinality else: _UpperCAmelCase : Any = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(_lowerCAmelCase ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) _UpperCAmelCase : List[str] = embedding_dimension else: _UpperCAmelCase : List[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] _UpperCAmelCase : int = num_parallel_samples # Transformer architecture configuration _UpperCAmelCase : int = input_size * len(self.lags_sequence ) + self._number_of_features _UpperCAmelCase : List[Any] = d_model _UpperCAmelCase : Tuple = encoder_attention_heads _UpperCAmelCase : List[Any] = decoder_attention_heads _UpperCAmelCase : Union[str, Any] = encoder_ffn_dim _UpperCAmelCase : int = decoder_ffn_dim _UpperCAmelCase : Tuple = encoder_layers _UpperCAmelCase : str = decoder_layers _UpperCAmelCase : Optional[Any] = dropout _UpperCAmelCase : List[str] = attention_dropout _UpperCAmelCase : Any = activation_dropout _UpperCAmelCase : Tuple = encoder_layerdrop _UpperCAmelCase : Optional[Any] = decoder_layerdrop _UpperCAmelCase : Any = activation_function _UpperCAmelCase : Optional[Any] = init_std _UpperCAmelCase : Optional[Any] = use_cache # Autoformer _UpperCAmelCase : Tuple = label_length _UpperCAmelCase : Dict = moving_average _UpperCAmelCase : Optional[Any] = autocorrelation_factor super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase ) @property def _A ( self : Tuple ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
31
'''simple docstring''' import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument("--user", type=str, default="ubuntu") parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--key_path", type=str, default=None) parser.add_argument("--instance", type=str, default="V100:1") parser.add_argument("--provider", type=str, default="cheapest") parser.add_argument("--use_spot", type=bool, default=False) parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py") _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError("Cannot specify both BYO and on-demand cluster args") _SCREAMING_SNAKE_CASE = rh.cluster( name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path} ) else: _SCREAMING_SNAKE_CASE = rh.cluster( name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) _SCREAMING_SNAKE_CASE = args.example.rsplit("/", 1)[0] # Set up remote environment cluster.install_packages(["pip:./"]) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt''']) cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"]) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}''']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
158
0
import argparse import math import traceback import dateutil.parser as date_parser import requests def __lowerCamelCase ( UpperCAmelCase_ : Any ): """simple docstring""" a :int = {} a :List[Any] = job['''started_at'''] a :List[Any] = job['''completed_at'''] a :List[str] = date_parser.parse(UpperCAmelCase_ ) a :Dict = date_parser.parse(UpperCAmelCase_ ) a :Optional[int] = round((end_datetime - start_datetime).total_seconds() / 60.0 ) a :List[str] = start a :Optional[Any] = end a :List[Any] = duration_in_min return job_info def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=None ): """simple docstring""" a :str = None if token is not None: a :Optional[Any] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''} a :Optional[int] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' a :str = requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).json() a :Tuple = {} try: job_time.update({job['''name''']: extract_time_from_single_job(UpperCAmelCase_ ) for job in result['''jobs''']} ) a :Tuple = math.ceil((result['''total_count'''] - 100) / 100 ) for i in range(UpperCAmelCase_ ): a :Union[str, Any] = requests.get(url + F'''&page={i + 2}''' , headers=UpperCAmelCase_ ).json() job_time.update({job['''name''']: extract_time_from_single_job(UpperCAmelCase_ ) for job in result['''jobs''']} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": snake_case : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') snake_case : Union[str, Any] = parser.parse_args() snake_case : Union[str, Any] = get_job_time(args.workflow_run_id) snake_case : Optional[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F"""{k}: {v["duration"]}""")
281
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case : Union[str, Any] = logging.get_logger(__name__) snake_case : List[str] = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'efficientnet' def __init__( self , _lowerCamelCase = 3 , _lowerCamelCase = 600 , _lowerCamelCase = 2.0 , _lowerCamelCase = 3.1 , _lowerCamelCase = 8 , _lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , _lowerCamelCase = [32, 16, 24, 40, 80, 112, 192] , _lowerCamelCase = [16, 24, 40, 80, 112, 192, 320] , _lowerCamelCase = [] , _lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , _lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , _lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , _lowerCamelCase = 0.25 , _lowerCamelCase = "swish" , _lowerCamelCase = 2560 , _lowerCamelCase = "mean" , _lowerCamelCase = 0.02 , _lowerCamelCase = 0.001 , _lowerCamelCase = 0.99 , _lowerCamelCase = 0.5 , _lowerCamelCase = 0.2 , **_lowerCamelCase , ): super().__init__(**_lowerCamelCase ) a :Optional[int] = num_channels a :List[str] = image_size a :int = width_coefficient a :Optional[Any] = depth_coefficient a :Any = depth_divisor a :Any = kernel_sizes a :Tuple = in_channels a :Union[str, Any] = out_channels a :Any = depthwise_padding a :Any = strides a :Optional[Any] = num_block_repeats a :Tuple = expand_ratios a :Dict = squeeze_expansion_ratio a :int = hidden_act a :Dict = hidden_dim a :Tuple = pooling_type a :Any = initializer_range a :Tuple = batch_norm_eps a :Optional[int] = batch_norm_momentum a :List[Any] = dropout_rate a :Optional[int] = drop_connect_rate a :Tuple = sum(_lowerCamelCase ) * 4 class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): return 1e-5
281
1
import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline snake_case : Tuple = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : tuple , _snake_case : Path , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : List[str]=False , ) -> Optional[Any]: '''simple docstring''' output_path.parent.mkdir(parents=_snake_case , exist_ok=_snake_case ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( _snake_case , _snake_case , f=output_path.as_posix() , input_names=_snake_case , output_names=_snake_case , dynamic_axes=_snake_case , do_constant_folding=_snake_case , use_external_data_format=_snake_case , enable_onnx_checker=_snake_case , opset_version=_snake_case , ) else: export( _snake_case , _snake_case , f=output_path.as_posix() , input_names=_snake_case , output_names=_snake_case , dynamic_axes=_snake_case , do_constant_folding=_snake_case , opset_version=_snake_case , ) @torch.no_grad() def lowerCAmelCase_ ( _snake_case : str , _snake_case : str , _snake_case : int , _snake_case : bool = False ) -> str: '''simple docstring''' __magic_name__ : Tuple = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): __magic_name__ : List[str] = "cuda" elif fpaa and not torch.cuda.is_available(): raise ValueError("`float16` model export is only supported on GPUs with CUDA" ) else: __magic_name__ : Tuple = "cpu" __magic_name__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(_snake_case , torch_dtype=_snake_case ).to(_snake_case ) __magic_name__ : List[str] = Path(_snake_case ) # TEXT ENCODER __magic_name__ : Tuple = pipeline.text_encoder.config.max_position_embeddings __magic_name__ : List[str] = pipeline.text_encoder.config.hidden_size __magic_name__ : List[Any] = pipeline.tokenizer( "A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=_snake_case , return_tensors="pt" , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_snake_case , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={ "input_ids": {0: "batch", 1: "sequence"}, } , opset=_snake_case , ) del pipeline.text_encoder # UNET __magic_name__ : Any = pipeline.unet.config.in_channels __magic_name__ : Tuple = pipeline.unet.config.sample_size __magic_name__ : int = output_path / "unet" / "model.onnx" onnx_export( pipeline.unet , model_args=( torch.randn(2 , _snake_case , _snake_case , _snake_case ).to(device=_snake_case , dtype=_snake_case ), torch.randn(2 ).to(device=_snake_case , dtype=_snake_case ), torch.randn(2 , _snake_case , _snake_case ).to(device=_snake_case , dtype=_snake_case ), False, ) , output_path=_snake_case , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={ "sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, "timestep": {0: "batch"}, "encoder_hidden_states": {0: "batch", 1: "sequence"}, } , opset=_snake_case , use_external_data_format=_snake_case , ) __magic_name__ : Any = str(unet_path.absolute().as_posix() ) __magic_name__ : int = os.path.dirname(_snake_case ) __magic_name__ : Tuple = onnx.load(_snake_case ) # clean up existing tensor files shutil.rmtree(_snake_case ) os.mkdir(_snake_case ) # collate external tensor files into one onnx.save_model( _snake_case , _snake_case , save_as_external_data=_snake_case , all_tensors_to_one_file=_snake_case , location="weights.pb" , convert_attribute=_snake_case , ) del pipeline.unet # VAE ENCODER __magic_name__ : int = pipeline.vae __magic_name__ : Tuple = vae_encoder.config.in_channels __magic_name__ : List[Any] = vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder __magic_name__ : Union[str, Any] = lambda _snake_case , _snake_case : vae_encoder.encode(_snake_case , _snake_case )[0].sample() onnx_export( _snake_case , model_args=( torch.randn(1 , _snake_case , _snake_case , _snake_case ).to(device=_snake_case , dtype=_snake_case ), False, ) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={ "sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, } , opset=_snake_case , ) # VAE DECODER __magic_name__ : Dict = pipeline.vae __magic_name__ : Tuple = vae_decoder.config.latent_channels __magic_name__ : Optional[Any] = vae_decoder.config.out_channels # forward only through the decoder part __magic_name__ : str = vae_encoder.decode onnx_export( _snake_case , model_args=( torch.randn(1 , _snake_case , _snake_case , _snake_case ).to(device=_snake_case , dtype=_snake_case ), False, ) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={ "latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, } , opset=_snake_case , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: __magic_name__ : List[Any] = pipeline.safety_checker __magic_name__ : Optional[int] = safety_checker.config.vision_config.num_channels __magic_name__ : str = safety_checker.config.vision_config.image_size __magic_name__ : List[Any] = safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , _snake_case , _snake_case , _snake_case , ).to(device=_snake_case , dtype=_snake_case ), torch.randn(1 , _snake_case , _snake_case , _snake_case ).to(device=_snake_case , dtype=_snake_case ), ) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={ "clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"}, "images": {0: "batch", 1: "height", 2: "width", 3: "channels"}, } , opset=_snake_case , ) del pipeline.safety_checker __magic_name__ : Optional[int] = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" ) __magic_name__ : Tuple = pipeline.feature_extractor else: __magic_name__ : Optional[Any] = None __magic_name__ : Dict = None __magic_name__ : Optional[int] = OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=_snake_case , feature_extractor=_snake_case , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(_snake_case ) print("ONNX pipeline saved to" , _snake_case ) del pipeline del onnx_pipeline __magic_name__ : int = OnnxStableDiffusionPipeline.from_pretrained(_snake_case , provider="CPUExecutionProvider" ) print("ONNX pipeline is loadable" ) if __name__ == "__main__": snake_case : List[str] = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") snake_case : List[Any] = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
281
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() snake_case : Optional[Any] = logging.get_logger(__name__) def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Union[str, Any]=False ) -> List[str]: '''simple docstring''' __magic_name__ : Union[str, Any] = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __magic_name__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Any , _snake_case : Dict=False ) -> int: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: __magic_name__ : int = "" else: __magic_name__ : Union[str, Any] = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __magic_name__ : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) __magic_name__ : int = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict __magic_name__ : Dict = in_proj_weight[ : config.hidden_size, : ] __magic_name__ : List[str] = in_proj_bias[: config.hidden_size] __magic_name__ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __magic_name__ : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __magic_name__ : Optional[Any] = in_proj_weight[ -config.hidden_size :, : ] __magic_name__ : int = in_proj_bias[-config.hidden_size :] def lowerCAmelCase_ ( _snake_case : List[str] ) -> List[str]: '''simple docstring''' __magic_name__ : List[str] = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(_snake_case , _snake_case ) def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] ) -> Optional[int]: '''simple docstring''' __magic_name__ : int = dct.pop(_snake_case ) __magic_name__ : List[Any] = val def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' __magic_name__ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg" __magic_name__ : List[str] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Any , _snake_case : int=False ) -> Dict: '''simple docstring''' __magic_name__ : List[str] = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_snake_case , ) __magic_name__ : List[str] = ViTHybridConfig(backbone_config=_snake_case , image_size=384 , num_labels=1000 ) __magic_name__ : str = False # load original model from timm __magic_name__ : Union[str, Any] = timm.create_model(_snake_case , pretrained=_snake_case ) timm_model.eval() # load state_dict of original model, remove and rename some keys __magic_name__ : List[Any] = timm_model.state_dict() if base_model: remove_classification_head_(_snake_case ) __magic_name__ : Tuple = create_rename_keys(_snake_case , _snake_case ) for src, dest in rename_keys: rename_key(_snake_case , _snake_case , _snake_case ) read_in_q_k_v(_snake_case , _snake_case , _snake_case ) __magic_name__ : List[str] = "huggingface/label-files" __magic_name__ : int = "imagenet-1k-id2label.json" __magic_name__ : Optional[int] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) ) __magic_name__ : int = {int(_snake_case ): v for k, v in idalabel.items()} __magic_name__ : List[str] = idalabel __magic_name__ : List[str] = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": __magic_name__ : List[str] = ViTHybridModel(_snake_case ).eval() else: __magic_name__ : str = ViTHybridForImageClassification(_snake_case ).eval() model.load_state_dict(_snake_case ) # create image processor __magic_name__ : List[Any] = create_transform(**resolve_data_config({} , model=_snake_case ) ) __magic_name__ : int = transform.transforms __magic_name__ : List[str] = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } __magic_name__ : int = ViTHybridImageProcessor( do_resize=_snake_case , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_snake_case , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_snake_case , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) __magic_name__ : List[Any] = prepare_img() __magic_name__ : Any = transform(_snake_case ).unsqueeze(0 ) __magic_name__ : Tuple = processor(_snake_case , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(_snake_case , _snake_case ) # verify logits with torch.no_grad(): __magic_name__ : Optional[int] = model(_snake_case ) __magic_name__ : List[str] = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: __magic_name__ : List[str] = timm_model.forward_features(_snake_case ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_snake_case , outputs.pooler_output , atol=1E-3 ) else: __magic_name__ : Any = timm_model(_snake_case ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_snake_case , outputs.logits , atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(_snake_case ).mkdir(exist_ok=_snake_case ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_snake_case ) if push_to_hub: print(F'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(F'''ybelkada/{vit_name}''' ) processor.push_to_hub(F'''ybelkada/{vit_name}''' ) if __name__ == "__main__": snake_case : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--vit_name", default="vit_base_r50_s16_384", type=str, help="Name of the hybrid ViT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) snake_case : List[Any] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
281
1
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Optional[Any] = { 'snap-research/efficientformer-l1-300': ( 'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json' ), } class __A (a__): '''simple docstring''' __lowercase: Dict = "efficientformer" def __init__( self : List[str] , UpperCAmelCase_ : Optional[int] = [3, 2, 6, 4] , UpperCAmelCase_ : int = [48, 96, 224, 448] , UpperCAmelCase_ : List[str] = [True, True, True, True] , UpperCAmelCase_ : Dict = 448 , UpperCAmelCase_ : str = 32 , UpperCAmelCase_ : Optional[Any] = 4 , UpperCAmelCase_ : str = 7 , UpperCAmelCase_ : int = 5 , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : List[Any] = 4 , UpperCAmelCase_ : Tuple = 0.0 , UpperCAmelCase_ : Any = 16 , UpperCAmelCase_ : Dict = 3 , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : List[str] = 3 , UpperCAmelCase_ : List[str] = 2 , UpperCAmelCase_ : Tuple = 1 , UpperCAmelCase_ : int = 0.0 , UpperCAmelCase_ : List[Any] = 1 , UpperCAmelCase_ : List[Any] = True , UpperCAmelCase_ : Union[str, Any] = True , UpperCAmelCase_ : str = 1E-5 , UpperCAmelCase_ : Dict = "gelu" , UpperCAmelCase_ : Dict = 0.02 , UpperCAmelCase_ : int = 1E-12 , UpperCAmelCase_ : Dict = 224 , UpperCAmelCase_ : Any = 1E-05 , **UpperCAmelCase_ : Tuple , ) ->None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE_ ) snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = hidden_sizes snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = patch_size snake_case_ = num_channels snake_case_ = depths snake_case_ = mlp_expansion_ratio snake_case_ = downsamples snake_case_ = dim snake_case_ = key_dim snake_case_ = attention_ratio snake_case_ = resolution snake_case_ = pool_size snake_case_ = downsample_patch_size snake_case_ = downsample_stride snake_case_ = downsample_pad snake_case_ = drop_path_rate snake_case_ = num_metaad_blocks snake_case_ = distillation snake_case_ = use_layer_scale snake_case_ = layer_scale_init_value snake_case_ = image_size snake_case_ = batch_norm_eps
365
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __A (snake_case__ , snake_case__ , unittest.TestCase): '''simple docstring''' __lowercase: List[Any] = CycleDiffusionPipeline __lowercase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { """negative_prompt""", """height""", """width""", """negative_prompt_embeds""", } __lowercase: str = PipelineTesterMixin.required_optional_params - {"""latents"""} __lowercase: Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""}) __lowercase: Any = IMAGE_TO_IMAGE_IMAGE_PARAMS __lowercase: Any = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self : Dict ) ->Any: """simple docstring""" torch.manual_seed(0 ) snake_case_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) snake_case_ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1_000 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , ) torch.manual_seed(0 ) snake_case_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) snake_case_ = CLIPTextModel(UpperCAmelCase_ ) snake_case_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case_ = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowerCAmelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]=0 ) ->str: """simple docstring""" snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) snake_case_ = image / 2 + 0.5 if str(UpperCAmelCase_ ).startswith("""mps""" ): snake_case_ = torch.manual_seed(UpperCAmelCase_ ) else: snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) snake_case_ = { """prompt""": """An astronaut riding an elephant""", """source_prompt""": """An astronaut riding a horse""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """eta""": 0.1, """strength""": 0.8, """guidance_scale""": 3, """source_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" snake_case_ = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = CycleDiffusionPipeline(**UpperCAmelCase_ ) snake_case_ = pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ ) snake_case_ = pipe(**UpperCAmelCase_ ) snake_case_ = output.images snake_case_ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) snake_case_ = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def lowerCAmelCase ( self : Union[str, Any] ) ->str: """simple docstring""" snake_case_ = self.get_dummy_components() for name, module in components.items(): if hasattr(UpperCAmelCase_ , """half""" ): snake_case_ = module.half() snake_case_ = CycleDiffusionPipeline(**UpperCAmelCase_ ) snake_case_ = pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ ) snake_case_ = pipe(**UpperCAmelCase_ ) snake_case_ = output.images snake_case_ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) snake_case_ = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowerCAmelCase ( self : Tuple ) ->Optional[int]: """simple docstring""" return super().test_save_load_local() @unittest.skip("""non-deterministic pipeline""" ) def lowerCAmelCase ( self : int ) ->Optional[int]: """simple docstring""" return super().test_inference_batch_single_identical() @skip_mps def lowerCAmelCase ( self : Tuple ) ->List[Any]: """simple docstring""" return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowerCAmelCase ( self : Any ) ->Tuple: """simple docstring""" return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase ( self : List[Any] ) ->str: """simple docstring""" return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class __A (unittest.TestCase): '''simple docstring''' def lowerCAmelCase ( self : List[Any] ) ->Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Optional[int] ) ->str: """simple docstring""" snake_case_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) snake_case_ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" ) snake_case_ = init_image.resize((512, 512) ) snake_case_ = """CompVis/stable-diffusion-v1-4""" snake_case_ = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder="""scheduler""" ) snake_case_ = CycleDiffusionPipeline.from_pretrained( UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , torch_dtype=torch.floataa , revision="""fp16""" ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) pipe.enable_attention_slicing() snake_case_ = """A black colored car""" snake_case_ = """A blue colored car""" snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=UpperCAmelCase_ , source_prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase_ , output_type="""np""" , ) snake_case_ = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def lowerCAmelCase ( self : Tuple ) ->List[str]: """simple docstring""" snake_case_ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) snake_case_ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" ) snake_case_ = init_image.resize((512, 512) ) snake_case_ = """CompVis/stable-diffusion-v1-4""" snake_case_ = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder="""scheduler""" ) snake_case_ = CycleDiffusionPipeline.from_pretrained(UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ ) pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) pipe.enable_attention_slicing() snake_case_ = """A black colored car""" snake_case_ = """A blue colored car""" snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=UpperCAmelCase_ , source_prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase_ , output_type="""np""" , ) snake_case_ = output.images assert np.abs(image - expected_image ).max() < 2E-2
233
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase = { "configuration_xmod": [ "XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP", "XmodConfig", "XmodOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ "XMOD_PRETRAINED_MODEL_ARCHIVE_LIST", "XmodForCausalLM", "XmodForMaskedLM", "XmodForMultipleChoice", "XmodForQuestionAnswering", "XmodForSequenceClassification", "XmodForTokenClassification", "XmodModel", "XmodPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
178
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor lowercase = logging.get_logger(__name__) class UpperCamelCase_ ( snake_case_ ): '''simple docstring''' def __init__( self , *a , **a ) -> None: warnings.warn( 'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PerceiverImageProcessor instead.' , a , ) super().__init__(*a , **a )
178
1
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin _A = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _A = 25_0004 _A = 25_0020 @require_sentencepiece @require_tokenizers class UpperCAmelCase__ ( A_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Optional[int] = MBartaaTokenizer UpperCAmelCase__ : Optional[int] = MBartaaTokenizerFast UpperCAmelCase__ : Tuple = True UpperCAmelCase__ : List[Any] = True def _a ( self ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing __UpperCamelCase =MBartaaTokenizer(A_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=A_ ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self ) -> List[Any]: __UpperCamelCase ='<s>' __UpperCamelCase =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ ) def _a ( self ) -> str: __UpperCamelCase =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(A_ ) , 1054 ) def _a ( self ) -> Any: self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def _a ( self ) -> Optional[int]: __UpperCamelCase =MBartaaTokenizer(A_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=A_ ) __UpperCamelCase =tokenizer.tokenize('This is a test' ) self.assertListEqual(A_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __UpperCamelCase =tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( A_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) __UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ ) self.assertListEqual( A_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __UpperCamelCase =tokenizer.convert_ids_to_tokens(A_ ) self.assertListEqual( A_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def _a ( self ) -> int: # fmt: off __UpperCamelCase ={'input_ids': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A_ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , ) def _a ( self ) -> Union[str, Any]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __UpperCamelCase =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) __UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ ) __UpperCamelCase =tempfile.mkdtemp() __UpperCamelCase =tokenizer_r.save_pretrained(A_ ) __UpperCamelCase =tokenizer_p.save_pretrained(A_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) __UpperCamelCase =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(A_ , A_ ) # Checks everything loads correctly in the same way __UpperCamelCase =tokenizer_r.from_pretrained(A_ ) __UpperCamelCase =tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ , A_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(A_ ) # Save tokenizer rust, legacy_format=True __UpperCamelCase =tempfile.mkdtemp() __UpperCamelCase =tokenizer_r.save_pretrained(A_ , legacy_format=A_ ) __UpperCamelCase =tokenizer_p.save_pretrained(A_ ) # Checks it save with the same files self.assertSequenceEqual(A_ , A_ ) # Checks everything loads correctly in the same way __UpperCamelCase =tokenizer_r.from_pretrained(A_ ) __UpperCamelCase =tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ , A_ ) ) shutil.rmtree(A_ ) # Save tokenizer rust, legacy_format=False __UpperCamelCase =tempfile.mkdtemp() __UpperCamelCase =tokenizer_r.save_pretrained(A_ , legacy_format=A_ ) __UpperCamelCase =tokenizer_p.save_pretrained(A_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __UpperCamelCase =tokenizer_r.from_pretrained(A_ ) __UpperCamelCase =tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ , A_ ) ) shutil.rmtree(A_ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Any = "facebook/mbart-large-50-one-to-many-mmt" UpperCAmelCase__ : Tuple = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] UpperCAmelCase__ : Tuple = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] UpperCAmelCase__ : List[str] = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def _a ( cls ) -> Optional[Any]: __UpperCamelCase =MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' ) __UpperCamelCase =1 return cls def _a ( self ) -> Optional[int]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 250038 ) def _a ( self ) -> Tuple: __UpperCamelCase =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , A_ ) def _a ( self ) -> List[str]: self.assertIn(A_ , self.tokenizer.all_special_ids ) __UpperCamelCase =[RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] __UpperCamelCase =self.tokenizer.decode(A_ , skip_special_tokens=A_ ) __UpperCamelCase =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A_ ) self.assertEqual(A_ , A_ ) self.assertNotIn(self.tokenizer.eos_token , A_ ) def _a ( self ) -> Any: __UpperCamelCase =['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , A_ ) __UpperCamelCase =10 __UpperCamelCase =self.tokenizer(A_ , max_length=A_ , truncation=A_ ).input_ids[0] self.assertEqual(ids[0] , A_ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(A_ ) , A_ ) def _a ( self ) -> int: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250053, 250001] ) def _a ( self ) -> Optional[Any]: __UpperCamelCase =tempfile.mkdtemp() __UpperCamelCase =self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(A_ ) __UpperCamelCase =MBartaaTokenizer.from_pretrained(A_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A_ ) @require_torch def _a ( self ) -> Tuple: __UpperCamelCase =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A_ , return_tensors='pt' ) __UpperCamelCase =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def _a ( self ) -> List[Any]: __UpperCamelCase =self.tokenizer( self.src_text , text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) __UpperCamelCase =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(A_ , A_ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) __UpperCamelCase =batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , A_ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def _a ( self ) -> Optional[int]: __UpperCamelCase =self.tokenizer(self.src_text , padding=A_ , truncation=A_ , max_length=3 , return_tensors='pt' ) __UpperCamelCase =self.tokenizer( text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=10 , return_tensors='pt' ) __UpperCamelCase =targets['input_ids'] __UpperCamelCase =shift_tokens_right(A_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _a ( self ) -> Any: __UpperCamelCase =self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(A_ ) , { # en_XX, A, test, EOS 'input_ids': [[250004, 62, 3034, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 250001, } , )
360
import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : torch.FloatTensor UpperCAmelCase__ : Optional[torch.FloatTensor] = None def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int=0.999 , SCREAMING_SNAKE_CASE__ : str="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(SCREAMING_SNAKE_CASE__ : Any ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(SCREAMING_SNAKE_CASE__ : Optional[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' ) __UpperCamelCase =[] for i in range(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =i / num_diffusion_timesteps __UpperCamelCase =(i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE__ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) ) return torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa ) class UpperCAmelCase__ ( A_ , A_ ): """simple docstring""" @register_to_config def __init__( self , A_ = 1000 , A_ = "fixed_small_log" , A_ = True , A_ = 1.0 , A_ = "epsilon" , A_ = "squaredcos_cap_v2" , ) -> Tuple: if beta_schedule != "squaredcos_cap_v2": raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' ) __UpperCamelCase =betas_for_alpha_bar(A_ ) __UpperCamelCase =1.0 - self.betas __UpperCamelCase =torch.cumprod(self.alphas , dim=0 ) __UpperCamelCase =torch.tensor(1.0 ) # standard deviation of the initial noise distribution __UpperCamelCase =1.0 # setable values __UpperCamelCase =None __UpperCamelCase =torch.from_numpy(np.arange(0 , A_ )[::-1].copy() ) __UpperCamelCase =variance_type def _a ( self , A_ , A_ = None ) -> torch.FloatTensor: return sample def _a ( self , A_ , A_ = None ) -> Tuple: __UpperCamelCase =num_inference_steps __UpperCamelCase =(self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) __UpperCamelCase =(np.arange(0 , A_ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) __UpperCamelCase =torch.from_numpy(A_ ).to(A_ ) def _a ( self , A_ , A_=None , A_=None , A_=None ) -> List[Any]: if prev_timestep is None: __UpperCamelCase =t - 1 __UpperCamelCase =self.alphas_cumprod[t] __UpperCamelCase =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one __UpperCamelCase =1 - alpha_prod_t __UpperCamelCase =1 - alpha_prod_t_prev if prev_timestep == t - 1: __UpperCamelCase =self.betas[t] else: __UpperCamelCase =1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample __UpperCamelCase =beta_prod_t_prev / beta_prod_t * beta if variance_type is None: __UpperCamelCase =self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": __UpperCamelCase =torch.log(torch.clamp(A_ , min=1E-20 ) ) __UpperCamelCase =torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler __UpperCamelCase =variance.log() __UpperCamelCase =beta.log() __UpperCamelCase =(predicted_variance + 1) / 2 __UpperCamelCase =frac * max_log + (1 - frac) * min_log return variance def _a ( self , A_ , A_ , A_ , A_ = None , A_=None , A_ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]: __UpperCamelCase =timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": __UpperCamelCase , __UpperCamelCase =torch.split(A_ , sample.shape[1] , dim=1 ) else: __UpperCamelCase =None # 1. compute alphas, betas if prev_timestep is None: __UpperCamelCase =t - 1 __UpperCamelCase =self.alphas_cumprod[t] __UpperCamelCase =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one __UpperCamelCase =1 - alpha_prod_t __UpperCamelCase =1 - alpha_prod_t_prev if prev_timestep == t - 1: __UpperCamelCase =self.betas[t] __UpperCamelCase =self.alphas[t] else: __UpperCamelCase =1 - alpha_prod_t / alpha_prod_t_prev __UpperCamelCase =1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": __UpperCamelCase =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": __UpperCamelCase =model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`' ' for the UnCLIPScheduler.' ) # 3. Clip "predicted x_0" if self.config.clip_sample: __UpperCamelCase =torch.clamp( A_ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __UpperCamelCase =(alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t __UpperCamelCase =alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __UpperCamelCase =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise __UpperCamelCase =0 if t > 0: __UpperCamelCase =randn_tensor( model_output.shape , dtype=model_output.dtype , generator=A_ , device=model_output.device ) __UpperCamelCase =self._get_variance( A_ , predicted_variance=A_ , prev_timestep=A_ , ) if self.variance_type == "fixed_small_log": __UpperCamelCase =variance elif self.variance_type == "learned_range": __UpperCamelCase =(0.5 * variance).exp() else: raise ValueError( f'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`' ' for the UnCLIPScheduler.' ) __UpperCamelCase =variance * variance_noise __UpperCamelCase =pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=A_ , pred_original_sample=A_ ) def _a ( self , A_ , A_ , A_ , ) -> torch.FloatTensor: # Make sure alphas_cumprod and timestep have same device and dtype as original_samples __UpperCamelCase =self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) __UpperCamelCase =timesteps.to(original_samples.device ) __UpperCamelCase =alphas_cumprod[timesteps] ** 0.5 __UpperCamelCase =sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): __UpperCamelCase =sqrt_alpha_prod.unsqueeze(-1 ) __UpperCamelCase =(1 - alphas_cumprod[timesteps]) ** 0.5 __UpperCamelCase =sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): __UpperCamelCase =sqrt_one_minus_alpha_prod.unsqueeze(-1 ) __UpperCamelCase =sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
117
0
"""simple docstring""" def __UpperCAmelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float: '''simple docstring''' return price * (1 + tax_rate) if __name__ == "__main__": print(f'''{price_plus_tax(100, 0.2_5) = }''') print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
172
"""simple docstring""" import flax.linen as nn import jax import jax.numpy as jnp class UpperCamelCase ( nn.Module ): UpperCAmelCase : int UpperCAmelCase : jnp.dtype = jnp.floataa def _lowercase (self : Any) -> Optional[int]: __snake_case : str = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__(self : Any , _A : Any) -> str: __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = hidden_states.shape __snake_case : Union[str, Any] = jax.image.resize( _A , shape=(batch, height * 2, width * 2, channels) , method='nearest' , ) __snake_case : List[Any] = self.conv(_A) return hidden_states class UpperCamelCase ( nn.Module ): UpperCAmelCase : int UpperCAmelCase : jnp.dtype = jnp.floataa def _lowercase (self : Optional[Any]) -> List[Any]: __snake_case : Dict = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__(self : int , _A : str) -> Any: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) __snake_case : Union[str, Any] = self.conv(_A) return hidden_states class UpperCamelCase ( nn.Module ): UpperCAmelCase : int UpperCAmelCase : int = None UpperCAmelCase : float = 0.0 UpperCAmelCase : bool = None UpperCAmelCase : jnp.dtype = jnp.floataa def _lowercase (self : List[str]) -> Dict: __snake_case : str = self.in_channels if self.out_channels is None else self.out_channels __snake_case : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1E-5) __snake_case : str = nn.Conv( _A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __snake_case : Optional[int] = nn.Dense(_A , dtype=self.dtype) __snake_case : int = nn.GroupNorm(num_groups=32 , epsilon=1E-5) __snake_case : str = nn.Dropout(self.dropout_prob) __snake_case : Dict = nn.Conv( _A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __snake_case : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __snake_case : Optional[Any] = None if use_nin_shortcut: __snake_case : List[str] = nn.Conv( _A , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , ) def __call__(self : List[Any] , _A : Union[str, Any] , _A : str , _A : int=True) -> Any: __snake_case : List[Any] = hidden_states __snake_case : Optional[Any] = self.norma(_A) __snake_case : int = nn.swish(_A) __snake_case : Optional[int] = self.conva(_A) __snake_case : Dict = self.time_emb_proj(nn.swish(_A)) __snake_case : List[str] = jnp.expand_dims(jnp.expand_dims(_A , 1) , 1) __snake_case : Any = hidden_states + temb __snake_case : Tuple = self.norma(_A) __snake_case : Dict = nn.swish(_A) __snake_case : Union[str, Any] = self.dropout(_A , _A) __snake_case : Union[str, Any] = self.conva(_A) if self.conv_shortcut is not None: __snake_case : List[Any] = self.conv_shortcut(_A) return hidden_states + residual
172
1
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowerCAmelCase__ : lowerCAmelCase_ = LEDConfig lowerCAmelCase_ = {} lowerCAmelCase_ = '''gelu''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=20 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=4 , ): """simple docstring""" lowercase_ : Optional[Any] = parent lowercase_ : Dict = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Union[str, Any] = is_training lowercase_ : Any = use_labels lowercase_ : Union[str, Any] = vocab_size lowercase_ : Union[str, Any] = hidden_size lowercase_ : int = num_hidden_layers lowercase_ : Any = num_attention_heads lowercase_ : Union[str, Any] = intermediate_size lowercase_ : List[str] = hidden_dropout_prob lowercase_ : int = attention_probs_dropout_prob lowercase_ : Optional[Any] = max_position_embeddings lowercase_ : Union[str, Any] = eos_token_id lowercase_ : Optional[Any] = pad_token_id lowercase_ : Optional[Any] = bos_token_id lowercase_ : Optional[int] = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after lowercase_ : Tuple = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests lowercase_ : str = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def _snake_case ( self ): """simple docstring""" lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowercase_ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowercase_ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ : Union[str, Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) lowercase_ : Optional[Any] = prepare_led_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = tf.concat( [tf.zeros_like(__SCREAMING_SNAKE_CASE )[:, :-1], tf.ones_like(__SCREAMING_SNAKE_CASE )[:, -1:]] , axis=-1 , ) lowercase_ : Optional[Any] = global_attention_mask return config, inputs_dict def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Optional[int] = TFLEDModel(config=__SCREAMING_SNAKE_CASE ).get_decoder() lowercase_ : List[Any] = inputs_dict['''input_ids'''] lowercase_ : List[str] = input_ids[:1, :] lowercase_ : Optional[int] = inputs_dict['''attention_mask'''][:1, :] lowercase_ : Dict = 1 # first forward pass lowercase_ : Tuple = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowercase_ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowercase_ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) lowercase_ : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowercase_ : Tuple = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0] lowercase_ : Optional[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowercase_ : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowercase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx] lowercase_ : Dict = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1E-3 ) def snake_case_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , ): """simple docstring""" if attention_mask is None: lowercase_ : int = tf.cast(tf.math.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowercase_ : Any = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowercase_ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowercase_ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): lowerCAmelCase_ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () lowerCAmelCase_ = (TFLEDForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase_ = ( { '''conversational''': TFLEDForConditionalGeneration, '''feature-extraction''': TFLEDModel, '''summarization''': TFLEDForConditionalGeneration, '''text2text-generation''': TFLEDForConditionalGeneration, '''translation''': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def _snake_case ( self ): """simple docstring""" lowercase_ : List[Any] = TFLEDModelTester(self ) lowercase_ : Optional[int] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def _snake_case ( self ): """simple docstring""" lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : Optional[int] = tf.zeros_like(inputs_dict['''attention_mask'''] ) lowercase_ : Tuple = 2 lowercase_ : Optional[int] = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) lowercase_ : Dict = True lowercase_ : Any = self.model_tester.seq_length lowercase_ : List[str] = self.model_tester.encoder_seq_length def check_decoder_attentions_output(__SCREAMING_SNAKE_CASE ): lowercase_ : Optional[int] = outputs.decoder_attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(__SCREAMING_SNAKE_CASE ): lowercase_ : Optional[int] = [t.numpy() for t in outputs.encoder_attentions] lowercase_ : Union[str, Any] = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: lowercase_ : str = True lowercase_ : Tuple = False lowercase_ : Dict = False lowercase_ : Optional[int] = model_class(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) lowercase_ : Dict = len(__SCREAMING_SNAKE_CASE ) self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(__SCREAMING_SNAKE_CASE ) if self.is_encoder_decoder: lowercase_ : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE ) check_decoder_attentions_output(__SCREAMING_SNAKE_CASE ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowercase_ : Tuple = True lowercase_ : str = model_class(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(__SCREAMING_SNAKE_CASE ) # Check attention is always last and order is fine lowercase_ : str = True lowercase_ : str = True lowercase_ : str = model_class(__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__SCREAMING_SNAKE_CASE ) ) self.assertEqual(model.config.output_hidden_states , __SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(__SCREAMING_SNAKE_CASE ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def _snake_case ( self ): """simple docstring""" pass def _snake_case ( self ): """simple docstring""" pass def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" return tf.constant(__SCREAMING_SNAKE_CASE , dtype=tf.intaa ) _lowercase : Optional[int] = 1E-4 @slow @require_tf class lowerCAmelCase__ ( unittest.TestCase ): def _snake_case ( self ): """simple docstring""" lowercase_ : List[Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here lowercase_ : Any = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) lowercase_ : Tuple = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) lowercase_ : int = prepare_led_inputs_dict(model.config , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = model(**__SCREAMING_SNAKE_CASE )[0] lowercase_ : Optional[int] = (1, 10_24, 7_68) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) # change to expected output here lowercase_ : Optional[int] = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) def _snake_case ( self ): """simple docstring""" lowercase_ : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here lowercase_ : Dict = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) lowercase_ : Tuple = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) lowercase_ : str = prepare_led_inputs_dict(model.config , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : str = model(**__SCREAMING_SNAKE_CASE )[0] lowercase_ : Tuple = (1, 10_24, model.config.vocab_size) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) # change to expected output here lowercase_ : Optional[Any] = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-3 , rtol=1E-3 )
350
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( lowerCamelCase_ ): lowerCAmelCase_ = (DDPMScheduler,) def _snake_case ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Optional[Any] = { '''num_train_timesteps''': 10_00, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**__SCREAMING_SNAKE_CASE ) return config def _snake_case ( self ): """simple docstring""" for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , ) def _snake_case ( self ): """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" lowercase_ : List[str] = self.scheduler_classes[0] lowercase_ : int = self.get_scheduler_config() lowercase_ : str = scheduler_class(**__SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1E-5 def _snake_case ( self ): """simple docstring""" lowercase_ : Union[str, Any] = self.scheduler_classes[0] lowercase_ : Any = self.get_scheduler_config() lowercase_ : Optional[int] = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowercase_ : Any = len(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = self.dummy_model() lowercase_ : Any = self.dummy_sample_deter lowercase_ : List[str] = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual lowercase_ : Optional[int] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 lowercase_ : Union[str, Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase_ : List[str] = pred_prev_sample lowercase_ : str = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowercase_ : Tuple = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 258.9_606 ) < 1E-2 assert abs(result_mean.item() - 0.3_372 ) < 1E-3 def _snake_case ( self ): """simple docstring""" lowercase_ : Optional[Any] = self.scheduler_classes[0] lowercase_ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowercase_ : Any = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = len(__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = self.dummy_model() lowercase_ : int = self.dummy_sample_deter lowercase_ : str = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual lowercase_ : Optional[Any] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 lowercase_ : Optional[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase_ : int = pred_prev_sample lowercase_ : str = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) lowercase_ : Tuple = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 202.0_296 ) < 1E-2 assert abs(result_mean.item() - 0.2_631 ) < 1E-3 def _snake_case ( self ): """simple docstring""" lowercase_ : Optional[int] = self.scheduler_classes[0] lowercase_ : int = self.get_scheduler_config() lowercase_ : int = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowercase_ : int = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = scheduler.timesteps for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ): if i == len(__SCREAMING_SNAKE_CASE ) - 1: lowercase_ : str = -1 else: lowercase_ : Any = timesteps[i + 1] lowercase_ : Optional[Any] = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE ) lowercase_ : int = prev_t.item() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" lowercase_ : Tuple = self.scheduler_classes[0] lowercase_ : List[Any] = self.get_scheduler_config() lowercase_ : str = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = [1_00, 87, 50, 51, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" lowercase_ : Optional[Any] = self.scheduler_classes[0] lowercase_ : str = self.get_scheduler_config() lowercase_ : Tuple = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = [1_00, 87, 50, 1, 0] lowercase_ : List[str] = len(__SCREAMING_SNAKE_CASE ) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" lowercase_ : List[Any] = self.scheduler_classes[0] lowercase_ : Optional[int] = self.get_scheduler_config() lowercase_ : Any = scheduler_class(**__SCREAMING_SNAKE_CASE ) lowercase_ : str = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
264
0
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowercase_ = pytest.mark.integration @pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] ) def a__ ( snake_case , snake_case ): """simple docstring""" inspect_dataset(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = path + '''.py''' assert script_name in os.listdir(snake_case ) assert "__pycache__" not in os.listdir(snake_case ) @pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.parametrize('''path''' , ['''accuracy'''] ) def a__ ( snake_case , snake_case ): """simple docstring""" inspect_metric(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Optional[Any] = path + '''.py''' assert script_name in os.listdir(snake_case ) assert "__pycache__" not in os.listdir(snake_case ) @pytest.mark.parametrize( '''path, config_name, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = get_dataset_config_info(snake_case , config_name=snake_case ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" with pytest.raises(snake_case ): get_dataset_config_info(snake_case , config_name=snake_case ) @pytest.mark.parametrize( '''path, expected''' , [ ('''squad''', '''plain_text'''), ('''acronym_identification''', '''default'''), ('''lhoestq/squad''', '''plain_text'''), ('''lhoestq/test''', '''default'''), ('''lhoestq/demo1''', '''lhoestq--demo1'''), ('''dalle-mini/wit''', '''dalle-mini--wit'''), ] , ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = get_dataset_config_names(snake_case ) assert expected in config_names @pytest.mark.parametrize( '''path, expected_configs, expected_splits_in_first_config''' , [ ('''squad''', ['''plain_text'''], ['''train''', '''validation''']), ('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']), ('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']), ] , ) def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = get_dataset_infos(snake_case ) assert list(infos.keys() ) == expected_configs __SCREAMING_SNAKE_CASE : Dict = expected_configs[0] assert expected_config in infos __SCREAMING_SNAKE_CASE : str = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( '''path, expected_config, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = get_dataset_infos(snake_case ) assert expected_config in infos __SCREAMING_SNAKE_CASE : Optional[int] = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" with pytest.raises(snake_case ): get_dataset_split_names(snake_case , config_name=snake_case )
303
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""", } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''mra''' def __init__( self : str , _A : List[str]=5_0265 , _A : int=768 , _A : Union[str, Any]=12 , _A : Union[str, Any]=12 , _A : Union[str, Any]=3072 , _A : Any="gelu" , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=512 , _A : Tuple=1 , _A : List[str]=0.02 , _A : Union[str, Any]=1e-5 , _A : Optional[int]="absolute" , _A : Union[str, Any]=4 , _A : List[Any]="full" , _A : Union[str, Any]=0 , _A : Union[str, Any]=0 , _A : Optional[Any]=1 , _A : Union[str, Any]=0 , _A : Any=2 , **_A : List[str] , ): """simple docstring""" super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A ) __SCREAMING_SNAKE_CASE : Dict = vocab_size __SCREAMING_SNAKE_CASE : str = max_position_embeddings __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : str = num_hidden_layers __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : str = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Tuple = initializer_range __SCREAMING_SNAKE_CASE : Any = type_vocab_size __SCREAMING_SNAKE_CASE : str = layer_norm_eps __SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type __SCREAMING_SNAKE_CASE : str = block_per_row __SCREAMING_SNAKE_CASE : Union[str, Any] = approx_mode __SCREAMING_SNAKE_CASE : Optional[int] = initial_prior_first_n_blocks __SCREAMING_SNAKE_CASE : List[Any] = initial_prior_diagonal_n_blocks
303
1
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase :int = logging.get_logger(__name__) _lowerCAmelCase :Dict = '▁' _lowerCAmelCase :Optional[Any] = { 'vocab_file': 'vocab.json', 'spm_file': 'sentencepiece.bpe.model', 'tokenizer_config_file': 'tokenizer_config.json', } _lowerCAmelCase :Optional[int] = { 'vocab_file': { 'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json', 'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json', }, 'spm_file': { 'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model', 'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model', }, 'tokenizer_config_file': { 'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json', 'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json', }, } _lowerCAmelCase :Tuple = { 'facebook/m2m100_418M': 1_024, } # fmt: off _lowerCAmelCase :Optional[Any] = { 'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'], 'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de'] } class _UpperCAmelCase ( a ): '''simple docstring''' a__ =VOCAB_FILES_NAMES a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ =PRETRAINED_VOCAB_FILES_MAP a__ =['''input_ids''', '''attention_mask'''] a__ =[] a__ =[] def __init__( self , A , A , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<pad>" , A="<unk>" , A="m2m100" , A = None , A=8 , **A , ) -> None: _UpperCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs _UpperCAmelCase : Dict = language_codes _UpperCAmelCase : str = FAIRSEQ_LANGUAGE_CODES[language_codes] _UpperCAmelCase : int = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code} _UpperCAmelCase : str = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(A ) for lang_code in fairseq_language_code if self.get_lang_token(A ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=A , tgt_lang=A , bos_token=A , eos_token=A , sep_token=A , unk_token=A , pad_token=A , language_codes=A , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A , **A , ) _UpperCAmelCase : List[str] = vocab_file _UpperCAmelCase : List[str] = load_json(A ) _UpperCAmelCase : Dict = {v: k for k, v in self.encoder.items()} _UpperCAmelCase : Union[str, Any] = spm_file _UpperCAmelCase : int = load_spm(A , self.sp_model_kwargs ) _UpperCAmelCase : Union[str, Any] = len(self.encoder ) _UpperCAmelCase : List[str] = { self.get_lang_token(A ): self.encoder_size + i for i, lang_code in enumerate(A ) } _UpperCAmelCase : Union[str, Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(A )} _UpperCAmelCase : Any = {v: k for k, v in self.lang_token_to_id.items()} _UpperCAmelCase : int = src_lang if src_lang is not None else '''en''' _UpperCAmelCase : Any = tgt_lang _UpperCAmelCase : List[Any] = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) _UpperCAmelCase : List[Any] = num_madeup_words @property def __lowerCAmelCase ( self ) -> int: return len(self.encoder ) + len(self.lang_token_to_id ) @property def __lowerCAmelCase ( self ) -> str: return self._src_lang @src_lang.setter def __lowerCAmelCase ( self , A ) -> None: _UpperCAmelCase : Optional[int] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __lowerCAmelCase ( self , A ) -> List[str]: return self.sp_model.encode(A , out_type=A ) def __lowerCAmelCase ( self , A ) -> List[str]: if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(A , self.encoder[self.unk_token] ) def __lowerCAmelCase ( self , A ) -> str: if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(A , self.unk_token ) def __lowerCAmelCase ( self , A ) -> List[Any]: _UpperCAmelCase : Optional[int] = [] _UpperCAmelCase : Union[str, Any] = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(A ) + token _UpperCAmelCase : Optional[Any] = [] else: current_sub_tokens.append(A ) out_string += self.sp_model.decode(A ) return out_string.strip() def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A , token_ids_a=A , already_has_special_tokens=A ) _UpperCAmelCase : int = [1] * len(self.prefix_tokens ) _UpperCAmelCase : Any = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(A )) + suffix_ones return prefix_ones + ([0] * len(A )) + ([0] * len(A )) + suffix_ones def __lowerCAmelCase ( self , A , A = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : List[Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Dict: _UpperCAmelCase : Union[str, Any] = self.__dict__.copy() _UpperCAmelCase : List[Any] = None return state def __setstate__( self , A ) -> None: _UpperCAmelCase : Any = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCAmelCase : int = {} _UpperCAmelCase : Tuple = load_spm(self.spm_file , self.sp_model_kwargs ) def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]: _UpperCAmelCase : Any = Path(A ) if not save_dir.is_dir(): raise OSError(f'{save_directory} should be a directory' ) _UpperCAmelCase : Any = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) _UpperCAmelCase : List[Any] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , A ) if os.path.abspath(self.spm_file ) != os.path.abspath(A ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , A ) elif not os.path.isfile(self.spm_file ): with open(A , '''wb''' ) as fi: _UpperCAmelCase : Dict = self.sp_model.serialized_model_proto() fi.write(A ) return (str(A ), str(A )) def __lowerCAmelCase ( self , A , A = "en" , A = None , A = "ro" , **A , ) -> BatchEncoding: _UpperCAmelCase : List[Any] = src_lang _UpperCAmelCase : Dict = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(A , A , **A ) def __lowerCAmelCase ( self , A , A , A , **A ) -> Any: if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) _UpperCAmelCase : str = src_lang _UpperCAmelCase : Any = self(A , add_special_tokens=A , **A ) _UpperCAmelCase : Union[str, Any] = self.get_lang_id(A ) _UpperCAmelCase : List[Any] = tgt_lang_id return inputs def __lowerCAmelCase ( self ) -> List[str]: self.set_src_lang_special_tokens(self.src_lang ) def __lowerCAmelCase ( self ) -> List[str]: self.set_tgt_lang_special_tokens(self.tgt_lang ) def __lowerCAmelCase ( self , A ) -> None: _UpperCAmelCase : List[str] = self.get_lang_token(A ) _UpperCAmelCase : Union[str, Any] = self.lang_token_to_id[lang_token] _UpperCAmelCase : Any = [self.cur_lang_id] _UpperCAmelCase : Optional[int] = [self.eos_token_id] def __lowerCAmelCase ( self , A ) -> None: _UpperCAmelCase : Optional[Any] = self.get_lang_token(A ) _UpperCAmelCase : List[Any] = self.lang_token_to_id[lang_token] _UpperCAmelCase : Optional[Any] = [self.cur_lang_id] _UpperCAmelCase : Union[str, Any] = [self.eos_token_id] def __lowerCAmelCase ( self , A ) -> str: return self.lang_code_to_token[lang] def __lowerCAmelCase ( self , A ) -> int: _UpperCAmelCase : List[Any] = self.get_lang_token(A ) return self.lang_token_to_id[lang_token] def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : Dict[str, Any] ): _UpperCAmelCase : Any = sentencepiece.SentencePieceProcessor(**UpperCamelCase__ ) spm.Load(str(UpperCamelCase__ ) ) return spm def lowerCamelCase_ (UpperCamelCase__ : str ): with open(UpperCamelCase__ , '''r''' ) as f: return json.load(UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : str ): with open(UpperCamelCase__ , '''w''' ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ , indent=2 )
364
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class _UpperCAmelCase ( a ): '''simple docstring''' def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=False , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Dict: _UpperCAmelCase : Dict = parent _UpperCAmelCase : int = batch_size _UpperCAmelCase : Union[str, Any] = seq_length _UpperCAmelCase : str = is_training _UpperCAmelCase : Optional[Any] = use_input_mask _UpperCAmelCase : List[Any] = use_token_type_ids _UpperCAmelCase : str = use_labels _UpperCAmelCase : List[str] = vocab_size _UpperCAmelCase : str = hidden_size _UpperCAmelCase : List[str] = num_hidden_layers _UpperCAmelCase : List[str] = num_attention_heads _UpperCAmelCase : Union[str, Any] = intermediate_size _UpperCAmelCase : str = hidden_act _UpperCAmelCase : Optional[int] = hidden_dropout_prob _UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob _UpperCAmelCase : str = max_position_embeddings _UpperCAmelCase : Optional[int] = type_vocab_size _UpperCAmelCase : List[str] = type_sequence_label_size _UpperCAmelCase : Optional[Any] = initializer_range _UpperCAmelCase : Optional[int] = num_labels _UpperCAmelCase : int = num_choices _UpperCAmelCase : Dict = scope def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase : int = None if self.use_input_mask: _UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase : int = None _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : Optional[Any] = None if self.use_labels: _UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase : List[str] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ) -> Union[str, Any]: return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Tuple: _UpperCAmelCase : int = DistilBertModel(config=A ) model.to(A ) model.eval() _UpperCAmelCase : Optional[int] = model(A , A ) _UpperCAmelCase : List[str] = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Any: _UpperCAmelCase : int = DistilBertForMaskedLM(config=A ) model.to(A ) model.eval() _UpperCAmelCase : Any = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]: _UpperCAmelCase : Tuple = DistilBertForQuestionAnswering(config=A ) model.to(A ) model.eval() _UpperCAmelCase : Optional[int] = model( A , attention_mask=A , start_positions=A , end_positions=A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> str: _UpperCAmelCase : List[Any] = self.num_labels _UpperCAmelCase : Union[str, Any] = DistilBertForSequenceClassification(A ) model.to(A ) model.eval() _UpperCAmelCase : int = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]: _UpperCAmelCase : Any = self.num_labels _UpperCAmelCase : Optional[int] = DistilBertForTokenClassification(config=A ) model.to(A ) model.eval() _UpperCAmelCase : Optional[Any] = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]: _UpperCAmelCase : str = self.num_choices _UpperCAmelCase : Dict = DistilBertForMultipleChoice(config=A ) model.to(A ) model.eval() _UpperCAmelCase : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase : Optional[Any] = model( A , attention_mask=A , labels=A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase : Dict = self.prepare_config_and_inputs() ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : int = config_and_inputs _UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( a ,a ,unittest.TestCase ): '''simple docstring''' a__ =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) a__ =( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) a__ =True a__ =True a__ =True a__ =True def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase : Union[str, Any] = DistilBertModelTester(self ) _UpperCAmelCase : List[Any] = ConfigTester(self , config_class=A , dim=3_7 ) def __lowerCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*A ) def __lowerCAmelCase ( self ) -> Tuple: _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*A ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*A ) def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*A ) def __lowerCAmelCase ( self ) -> Tuple: _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*A ) def __lowerCAmelCase ( self ) -> Optional[int]: _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*A ) @slow def __lowerCAmelCase ( self ) -> Optional[Any]: for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : Optional[int] = DistilBertModel.from_pretrained(A ) self.assertIsNotNone(A ) @slow @require_torch_gpu def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return _UpperCAmelCase : Tuple = True _UpperCAmelCase : Union[str, Any] = model_class(config=A ) _UpperCAmelCase : List[Any] = self._prepare_for_class(A , A ) _UpperCAmelCase : int = torch.jit.trace( A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(A , os.path.join(A , '''traced_model.pt''' ) ) _UpperCAmelCase : Optional[int] = torch.jit.load(os.path.join(A , '''traced_model.pt''' ) , map_location=A ) loaded(inputs_dict['''input_ids'''].to(A ) , inputs_dict['''attention_mask'''].to(A ) ) @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : Dict = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) _UpperCAmelCase : List[str] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) _UpperCAmelCase : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _UpperCAmelCase : str = model(A , attention_mask=A )[0] _UpperCAmelCase : Dict = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , A ) _UpperCAmelCase : Any = torch.tensor( [[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
68
0
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch UpperCAmelCase__ = logging.get_logger(__name__) @add_end_docstrings( lowerCAmelCase , r''' top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). ''' , ) class lowerCamelCase__ ( lowerCAmelCase): def __A (self , UpperCAmelCase ) -> np.ndarray: if self.framework == "tf": _lowercase =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": _lowercase =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCAmelCase ) else: raise ValueError('''Unsupported framework''' ) return masked_index def __A (self , UpperCAmelCase ) -> np.ndarray: _lowercase =self.get_masked_index(UpperCAmelCase ) _lowercase =np.prod(masked_index.shape ) if numel < 1: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , ) def __A (self , UpperCAmelCase ) -> List[Any]: if isinstance(UpperCAmelCase , UpperCAmelCase ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(UpperCAmelCase ) def __A (self , UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ) -> Dict[str, GenericTensor]: if return_tensors is None: _lowercase =self.framework _lowercase =self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase ) self.ensure_exactly_one_mask_token(UpperCAmelCase ) return model_inputs def __A (self , UpperCAmelCase ) -> Union[str, Any]: _lowercase =self.model(**UpperCAmelCase ) _lowercase =model_inputs['''input_ids'''] return model_outputs def __A (self , UpperCAmelCase , UpperCAmelCase=5 , UpperCAmelCase=None ) -> Tuple: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: _lowercase =target_ids.shape[0] _lowercase =model_outputs['''input_ids'''][0] _lowercase =model_outputs['''logits'''] if self.framework == "tf": _lowercase =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] _lowercase =outputs.numpy() _lowercase =outputs[0, masked_index, :] _lowercase =stable_softmax(UpperCAmelCase , axis=-1 ) if target_ids is not None: _lowercase =tf.gather_nd(tf.squeeze(UpperCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) ) _lowercase =tf.expand_dims(UpperCAmelCase , 0 ) _lowercase =tf.math.top_k(UpperCAmelCase , k=UpperCAmelCase ) _lowercase , _lowercase =topk.values.numpy(), topk.indices.numpy() else: _lowercase =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCAmelCase ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample _lowercase =outputs[0, masked_index, :] _lowercase =logits.softmax(dim=-1 ) if target_ids is not None: _lowercase =probs[..., target_ids] _lowercase , _lowercase =probs.topk(UpperCAmelCase ) _lowercase =[] _lowercase =values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): _lowercase =[] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place _lowercase =input_ids.numpy().copy() if target_ids is not None: _lowercase =target_ids[p].tolist() _lowercase =p # Filter padding out: _lowercase =tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back _lowercase =self.tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) _lowercase ={'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence} row.append(UpperCAmelCase ) result.append(UpperCAmelCase ) if single_mask: return result[0] return result def __A (self , UpperCAmelCase , UpperCAmelCase=None ) -> List[str]: if isinstance(UpperCAmelCase , UpperCAmelCase ): _lowercase =[targets] try: _lowercase =self.tokenizer.get_vocab() except Exception: _lowercase ={} _lowercase =[] for target in targets: _lowercase =vocab.get(UpperCAmelCase , UpperCAmelCase ) if id_ is None: _lowercase =self.tokenizer( UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , max_length=1 , truncation=UpperCAmelCase , )['''input_ids'''] if len(UpperCAmelCase ) == 0: logger.warning( f"The specified target token `{target}` does not exist in the model vocabulary. " '''We cannot replace it with anything meaningful, ignoring it''' ) continue _lowercase =input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f"The specified target token `{target}` does not exist in the model vocabulary. " f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." ) target_ids.append(id_ ) _lowercase =list(set(UpperCAmelCase ) ) if len(UpperCAmelCase ) == 0: raise ValueError('''At least one target must be provided when passed.''' ) _lowercase =np.array(UpperCAmelCase ) return target_ids def __A (self , UpperCAmelCase=None , UpperCAmelCase=None ) -> List[str]: _lowercase ={} if targets is not None: _lowercase =self.get_target_ids(UpperCAmelCase , UpperCAmelCase ) _lowercase =target_ids if top_k is not None: _lowercase =top_k if self.tokenizer.mask_token_id is None: raise PipelineException( '''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' ) return {}, {}, postprocess_params def __call__(self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> Any: _lowercase =super().__call__(UpperCAmelCase , **UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) == 1: return outputs[0] return outputs
5
from typing import Any def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list: """simple docstring""" _validation( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) # Creates data structures and fill initial step _lowercase ={} _lowercase ={} for state in states_space: _lowercase =observations_space[0] _lowercase =( initial_probabilities[state] * emission_probabilities[state][observation] ) _lowercase =None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(__snake_case ) ): _lowercase =observations_space[o] _lowercase =observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function _lowercase ='''''' _lowercase =-1 for k_state in states_space: _lowercase =( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: _lowercase =probability _lowercase =k_state # Update probabilities and pointers dicts _lowercase =( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) _lowercase =arg_max # The final observation _lowercase =observations_space[len(__snake_case ) - 1] # argmax for given final observation _lowercase ='''''' _lowercase =-1 for k_state in states_space: _lowercase =probabilities[(k_state, final_observation)] if probability > max_probability: _lowercase =probability _lowercase =k_state _lowercase =arg_max # Process pointers backwards _lowercase =last_state _lowercase =[] for o in range(len(__snake_case ) - 1 , -1 , -1 ): result.append(__snake_case ) _lowercase =pointers[previous, observations_space[o]] result.reverse() return result def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None: """simple docstring""" _validate_not_empty( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) _validate_lists(__snake_case , __snake_case ) _validate_dicts( __snake_case , __snake_case , __snake_case ) def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None: """simple docstring""" if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('''There\'s an empty parameter''' ) def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None: """simple docstring""" _validate_list(__snake_case , '''observations_space''' ) _validate_list(__snake_case , '''states_space''' ) def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None: """simple docstring""" if not isinstance(_object , __snake_case ): _lowercase =F"{var_name} must be a list" raise ValueError(__snake_case ) else: for x in _object: if not isinstance(__snake_case , __snake_case ): _lowercase =F"{var_name} must be a list of strings" raise ValueError(__snake_case ) def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None: """simple docstring""" _validate_dict(__snake_case , '''initial_probabilities''' , __snake_case ) _validate_nested_dict(__snake_case , '''transition_probabilities''' ) _validate_nested_dict(__snake_case , '''emission_probabilities''' ) def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None: """simple docstring""" _validate_dict(_object , __snake_case , __snake_case ) for x in _object.values(): _validate_dict(__snake_case , __snake_case , __snake_case , __snake_case ) def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None: """simple docstring""" if not isinstance(_object , __snake_case ): _lowercase =F"{var_name} must be a dict" raise ValueError(__snake_case ) if not all(isinstance(__snake_case , __snake_case ) for x in _object ): _lowercase =F"{var_name} all keys must be strings" raise ValueError(__snake_case ) if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ): _lowercase ='''nested dictionary ''' if nested else '''''' _lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}" raise ValueError(__snake_case ) if __name__ == "__main__": from doctest import testmod testmod()
5
1
import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor lowerCAmelCase = logging.get_logger(__name__) class A ( snake_case_ ): def __init__(self , *lowerCAmelCase , **lowerCAmelCase ): warnings.warn( 'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DeiTImageProcessor instead.' , lowerCAmelCase , ) super().__init__(*lowerCAmelCase , **lowerCAmelCase )
356
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent lowerCAmelCase = {'''UserAgent''': UserAgent().random} def _lowerCamelCase( lowercase__ ) -> dict: '''simple docstring''' __lowercase= script.contents[0] __lowercase= json.loads(data[data.find('{"config"' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class A : def __init__(self , lowerCAmelCase ): __lowercase= f'https://www.instagram.com/{username}/' __lowercase= self.get_json() def _A (self ): __lowercase= requests.get(self.url , headers=lowerCAmelCase ).text __lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__(self ): return f'{self.__class__.__name__}(\'{self.username}\')' def __str__(self ): return f'{self.fullname} ({self.username}) is {self.biography}' @property def _A (self ): return self.user_data["username"] @property def _A (self ): return self.user_data["full_name"] @property def _A (self ): return self.user_data["biography"] @property def _A (self ): return self.user_data["business_email"] @property def _A (self ): return self.user_data["external_url"] @property def _A (self ): return self.user_data["edge_followed_by"]["count"] @property def _A (self ): return self.user_data["edge_follow"]["count"] @property def _A (self ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def _A (self ): return self.user_data["profile_pic_url_hd"] @property def _A (self ): return self.user_data["is_verified"] @property def _A (self ): return self.user_data["is_private"] def _lowerCamelCase( lowercase__ = "github" ) -> None: '''simple docstring''' import os if os.environ.get('CI' ): return # test failing on GitHub Actions __lowercase= InstagramUser(lowercase__ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , lowercase__ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_5_0 assert instagram_user.number_of_followers > 1_2_0_0_0_0 assert instagram_user.number_of_followings > 1_5 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('https://instagram.' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = InstagramUser('''github''') print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
304
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''spm_char.model'''} _lowerCAmelCase = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } _lowerCAmelCase = { '''microsoft/speecht5_asr''': 1024, '''microsoft/speecht5_tts''': 1024, '''microsoft/speecht5_vc''': 1024, } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = VOCAB_FILES_NAMES __lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : str = ['''input_ids''', '''attention_mask'''] def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None: lowerCAmelCase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,) lowerCAmelCase__ : Dict = vocab_file lowerCAmelCase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def UpperCAmelCase_ ( self ) -> Union[str, Any]: return self.sp_model.get_piece_size() def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> List[str]: lowerCAmelCase__ : Any = self.__dict__.copy() lowerCAmelCase__ : Dict = None return state def __setstate__( self ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[str] = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCAmelCase__ : int = {} lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return self.sp_model.piece_to_id(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : Tuple = self.sp_model.IdToPiece(__UpperCAmelCase ) return token def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : Optional[Any] = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__UpperCAmelCase ) + token lowerCAmelCase__ : Dict = [] else: current_sub_tokens.append(__UpperCAmelCase ) out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string.strip() def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase ) lowerCAmelCase__ : str = [1] if token_ids_a is None: return ([0] * len(__UpperCAmelCase )) + suffix_ones return ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : Dict = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase ,"""wb""" ) as fi: lowerCAmelCase__ : str = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
37
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers _lowerCAmelCase = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
37
1
'''simple docstring''' def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , ) -> float: """simple docstring""" UpperCamelCase = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: UpperCamelCase = 1 - (matter_density + radiation_density + dark_energy) UpperCamelCase = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) UpperCamelCase = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation _lowerCamelCase : Any = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
352
'''simple docstring''' import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline _lowerCamelCase : List[Any] = { "n_samples": 64, "horizon": 32, "num_inference_steps": 20, "n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network "scale_grad_by_std": True, "scale": 0.1, "eta": 0.0, "t_grad_cutoff": 2, "device": "cpu", } if __name__ == "__main__": _lowerCamelCase : Union[str, Any] = "hopper-medium-v2" _lowerCamelCase : Optional[int] = gym.make(env_name) _lowerCamelCase : Any = ValueGuidedRLPipeline.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", env=env, ) env.seed(0) _lowerCamelCase : Dict = env.reset() _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : str = 0 _lowerCamelCase : Dict = 1000 _lowerCamelCase : Tuple = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy _lowerCamelCase : List[str] = pipeline(obs, planning_horizon=32) # execute action in environment _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Dict = env.step(denorm_actions) _lowerCamelCase : Optional[Any] = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' f''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) _lowerCamelCase : Dict = next_observation except KeyboardInterrupt: pass print(f'''Total reward: {total_reward}''')
249
0
"""simple docstring""" _lowercase = '''Tobias Carryer''' from time import time class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : Optional[int] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Dict=int(time() ) ) -> str: # noqa: B008 A = multiplier A = increment A = modulo A = seed def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: A = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. _lowercase = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31) while True: print(lcg.next_number())
74
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = args.log_outputs lowerCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric lowerCAmelCase_ = load_metric('''wer''' ) lowerCAmelCase_ = load_metric('''cer''' ) # compute metrics lowerCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) lowerCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results lowerCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}" print(_A ) with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f: f.write(_A ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: lowerCAmelCase_ = f"log_{dataset_id}_predictions.txt" lowerCAmelCase_ = f"log_{dataset_id}_targets.txt" with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t: # mapping function to write output def write_to_file(_A , _A ): p.write(f"{i}" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"{i}" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(_A , with_indices=_A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training lowerCAmelCase_ = re.sub(_A , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! lowerCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: lowerCAmelCase_ = ''' '''.join(text.split(_A ) ) return text def __UpperCamelCase ( _A ): # load dataset lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id ) lowerCAmelCase_ = feature_extractor.sampling_rate # resample audio lowerCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) ) # load eval pipeline if args.device is None: lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1 lowerCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_A ): lowerCAmelCase_ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) lowerCAmelCase_ = prediction['''text'''] lowerCAmelCase_ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples lowerCAmelCase_ = dataset.map(_A , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_A , _A ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) _A = parser.parse_args() main(args)
278
0
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) __A : Optional[Any] = logging.getLogger() __A : Any = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Any: os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ ={"""source""": """What is love ?""", """target""": """life"""} lowerCamelCase_ ={"""train""": 12, """val""": 2, """test""": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: lowerCamelCase_ ="""\n""".join([contents[field]] * n_lines[split] ) with open(os.path.join(_SCREAMING_SNAKE_CASE , f'{split}.{field}' ) , """w""" ) as f: f.write(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "pytorch" )-> List[str]: lowerCamelCase_ =self.get_auto_remove_tmp_dir() lowerCamelCase_ =os.path.join(_SCREAMING_SNAKE_CASE , """output""" ) lowerCamelCase_ =os.path.join(_SCREAMING_SNAKE_CASE , """data""" ) self._create_dummy_data(data_dir=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =f'\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n '.split() if gpus > 0: testargs.append(f'--gpus={gpus}' ) if is_apex_available(): testargs.append("""--fp16""" ) else: testargs.append("""--gpus=0""" ) testargs.append("""--distributed_backend=ddp_cpu""" ) testargs.append("""--num_processes=2""" ) lowerCamelCase_ =[sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=self.get_env() ) lowerCamelCase_ =os.path.join(_SCREAMING_SNAKE_CASE , """metrics.json""" ) with open(_SCREAMING_SNAKE_CASE ) as f: lowerCamelCase_ =json.load(_SCREAMING_SNAKE_CASE ) return result @require_torch_gpu def _snake_case ( self )-> Tuple: lowerCamelCase_ =self._run_finetune(gpus=1 ) self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 ) @require_torch_multi_gpu def _snake_case ( self )-> List[str]: lowerCamelCase_ =self._run_finetune(gpus=2 ) self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 ) @require_torch_gpu @require_ray def _snake_case ( self )-> Any: lowerCamelCase_ =self._run_finetune(gpus=1 , distributed_retriever="""ray""" ) self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 ) @require_torch_multi_gpu @require_ray def _snake_case ( self )-> Optional[Any]: lowerCamelCase_ =self._run_finetune(gpus=1 , distributed_retriever="""ray""" ) self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
49
from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): def _snake_case ( self )-> Union[str, Any]: lowerCamelCase_ =SMALL_MODEL_IDENTIFIER lowerCamelCase_ ="""pt""" lowerCamelCase_ ="""tf""" def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]: lowerCamelCase_ =AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[Any]: lowerCamelCase_ =TFAutoModel.from_pretrained(self.test_model , from_pt=_SCREAMING_SNAKE_CASE ) model_tf.save_pretrained(_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Optional[Any]: lowerCamelCase_ ="""mock_framework""" # Framework provided - return whatever the user provides lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model , _SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Tuple: # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE ) def _snake_case ( self )-> Optional[Any]: lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE ) with patch("""transformers.onnx.features.is_tf_available""" , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_pt ) # PyTorch not in environment -> use TensorFlow lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE ) with patch("""transformers.onnx.features.is_torch_available""" , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_tf ) # Both in environment -> use PyTorch lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE ) with patch("""transformers.onnx.features.is_tf_available""" , _SCREAMING_SNAKE_CASE ), patch( """transformers.onnx.features.is_torch_available""" , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_pt ) # Both not in environment -> raise error lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE ) with patch("""transformers.onnx.features.is_tf_available""" , _SCREAMING_SNAKE_CASE ), patch( """transformers.onnx.features.is_torch_available""" , _SCREAMING_SNAKE_CASE ): with self.assertRaises(_SCREAMING_SNAKE_CASE ): lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model )
49
1
import argparse import os import re _A = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict _A = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings _A = re.compile(R'\s*\(\s*"(\S[^"]+)"') def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : bool = False ): with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f: __UpperCamelCase =f.read() __UpperCamelCase =content.split('\n' ) __UpperCamelCase =[] __UpperCamelCase =0 while line_idx < len(SCREAMING_SNAKE_CASE__ ): if _re_intro_mapping.search(lines[line_idx] ) is not None: __UpperCamelCase =len(re.search(r'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(' ' * indent + '(' ): new_lines.append(lines[line_idx] ) line_idx += 1 __UpperCamelCase =[] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": __UpperCamelCase =line_idx while not lines[line_idx].startswith(' ' * indent + ')' ): line_idx += 1 blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers __UpperCamelCase =sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : _re_identifier.search(SCREAMING_SNAKE_CASE__ ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as f: f.write('\n'.join(SCREAMING_SNAKE_CASE__ ) ) elif "\n".join(SCREAMING_SNAKE_CASE__ ) != content: return True def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : bool = False ): __UpperCamelCase =[os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for f in os.listdir(SCREAMING_SNAKE_CASE__ ) if f.endswith('.py' )] __UpperCamelCase =[sort_auto_mapping(SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ ) for fname in fnames] if not overwrite and any(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =[f for f, d in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if d] raise ValueError( F'The following files have auto mappings that need sorting: {", ".join(SCREAMING_SNAKE_CASE__ )}. Run `make style` to fix' ' this.' ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') _A = parser.parse_args() sort_all_auto_mappings(not args.check_only)
62
'''simple docstring''' _snake_case = 8.3_1_4_4_5_9_8 def _A ( snake_case , snake_case ) -> float: if temperature < 0: raise Exception("Temperature cannot be less than 0 K" ) if molar_mass <= 0: raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example _snake_case = 300 _snake_case = 28 _snake_case = rms_speed_of_molecule(temperature, molar_mass) print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
250
0
"""simple docstring""" from __future__ import annotations from random import choice def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" return choice(UpperCamelCase__ ) def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" A__ = random_pivot(UpperCamelCase__ ) # partition based on pivot # linear time A__ = [e for e in lst if e < pivot] A__ = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(UpperCamelCase__ ) == k - 1: return pivot # pivot is in elements bigger than k elif len(UpperCamelCase__ ) < k - 1: return kth_number(UpperCamelCase__ , k - len(UpperCamelCase__ ) - 1 ) # pivot is in elements smaller than k else: return kth_number(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
357
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs __lowerCamelCase = imread(R"digital_image_processing/image_data/lena_small.jpg") __lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY) def UpperCAmelCase ( ): """simple docstring""" A__ = cn.convert_to_negative(UpperCamelCase__ ) # assert negative_img array for at least one True assert negative_img.any() def UpperCAmelCase ( ): """simple docstring""" with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(UpperCamelCase__ , 110 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def UpperCAmelCase ( ): """simple docstring""" A__ = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def UpperCAmelCase ( ): """simple docstring""" A__ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() A__ = canny.canny(UpperCamelCase__ ) # assert canny array for at least one True assert canny_array.any() def UpperCAmelCase ( ): """simple docstring""" assert gg.gaussian_filter(UpperCamelCase__ , 5 , sigma=0.9 ).all() def UpperCAmelCase ( ): """simple docstring""" A__ = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] ) A__ = conv.img_convolve(UpperCamelCase__ , UpperCamelCase__ ).astype(UpperCamelCase__ ) assert res.any() def UpperCAmelCase ( ): """simple docstring""" assert med.median_filter(UpperCamelCase__ , 3 ).any() def UpperCAmelCase ( ): """simple docstring""" A__ , A__ = sob.sobel_filter(UpperCamelCase__ ) assert grad.any() and theta.any() def UpperCAmelCase ( ): """simple docstring""" A__ = sp.make_sepia(UpperCamelCase__ , 20 ) assert sepia.all() def UpperCAmelCase ( UpperCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ): """simple docstring""" A__ = bs.Burkes(imread(UpperCamelCase__ , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def UpperCAmelCase ( UpperCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ): """simple docstring""" A__ = rs.NearestNeighbour(imread(UpperCamelCase__ , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def UpperCAmelCase ( ): """simple docstring""" A__ = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. A__ = imread(UpperCamelCase__ , 0 ) # Test for get_neighbors_pixel function() return not None A__ = 0 A__ = 0 A__ = image[x_coordinate][y_coordinate] A__ = lbp.get_neighbors_pixel( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image A__ = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): A__ = lbp.local_binary_value(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) assert lbp_image.any()
154
0
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowercase__ : Optional[int] = abspath(join(dirname(__file__), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def __lowercase ( _a ): config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def __lowercase ( _a ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_a ) def __lowercase ( _a ): from transformers.testing_utils import pytest_terminal_summary_main snake_case_ : str = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(_a , id=_a ) def __lowercase ( _a , _a ): # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: snake_case_ : List[Any] = 0 # Doctest custom flag to ignore output. lowercase__ : Union[str, Any] = doctest.register_optionflag('''IGNORE_RESULT''') lowercase__ : Tuple = doctest.OutputChecker class _UpperCAmelCase ( lowerCAmelCase__): def _snake_case ( self : str , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : Any = CustomOutputChecker lowercase__ : str = HfDoctestModule lowercase__ : Tuple = HfDocTestParser
264
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : int = logging.get_logger(__name__) lowercase__ : List[Any] = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _UpperCAmelCase ( lowerCAmelCase__): _lowerCAmelCase : List[Any] = """gpt_neox""" def __init__( self : List[str] , lowercase_ : str=50432 , lowercase_ : List[Any]=6144 , lowercase_ : List[Any]=44 , lowercase_ : Union[str, Any]=64 , lowercase_ : List[str]=24576 , lowercase_ : List[Any]="gelu" , lowercase_ : str=0.25 , lowercase_ : Optional[int]=10000 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : int=0.1 , lowercase_ : Tuple=2048 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[str]=1E-5 , lowercase_ : str=True , lowercase_ : str=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=False , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=None , **lowercase_ : Optional[int] , ): super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) snake_case_ : List[str] = vocab_size snake_case_ : Optional[Any] = max_position_embeddings snake_case_ : str = hidden_size snake_case_ : Dict = num_hidden_layers snake_case_ : Dict = num_attention_heads snake_case_ : List[Any] = intermediate_size snake_case_ : List[Any] = hidden_act snake_case_ : str = rotary_pct snake_case_ : Dict = rotary_emb_base snake_case_ : Optional[int] = attention_dropout snake_case_ : Tuple = hidden_dropout snake_case_ : Tuple = classifier_dropout snake_case_ : List[str] = initializer_range snake_case_ : Union[str, Any] = layer_norm_eps snake_case_ : Any = use_cache snake_case_ : Optional[int] = tie_word_embeddings snake_case_ : Any = use_parallel_residual snake_case_ : Union[str, Any] = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def _snake_case ( self : Optional[int] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f"got {self.rope_scaling}" ) snake_case_ : Any = self.rope_scaling.get('''type''' , lowercase_ ) snake_case_ : Union[str, Any] = self.rope_scaling.get('''factor''' , lowercase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
264
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case : Dict = logging.get_logger(__name__) snake_case : Union[str, Any] = { '''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''', } class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ): UpperCAmelCase__ : Tuple = '''bit''' UpperCAmelCase__ : Tuple = ['''preactivation''', '''bottleneck'''] UpperCAmelCase__ : Any = ['''SAME''', '''VALID'''] def __init__( self :Optional[int] ,__snake_case :Optional[Any]=3 ,__snake_case :int=64 ,__snake_case :List[str]=[2_56, 5_12, 10_24, 20_48] ,__snake_case :str=[3, 4, 6, 3] ,__snake_case :Any="preactivation" ,__snake_case :Tuple="relu" ,__snake_case :str=None ,__snake_case :int=32 ,__snake_case :Optional[int]=0.0 ,__snake_case :Optional[Any]=False ,__snake_case :List[Any]=32 ,__snake_case :Tuple=1 ,__snake_case :List[str]=None ,__snake_case :List[str]=None ,**__snake_case :Optional[int] ,) -> Tuple: super().__init__(**__snake_case ) if layer_type not in self.layer_types: raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' ) if global_padding is not None: if global_padding.upper() in self.supported_padding: a__ = global_padding.upper() else: raise ValueError(F'Padding strategy {global_padding} not supported' ) a__ = num_channels a__ = embedding_size a__ = hidden_sizes a__ = depths a__ = layer_type a__ = hidden_act a__ = global_padding a__ = num_groups a__ = drop_path_rate a__ = embedding_dynamic_padding a__ = output_stride a__ = width_factor a__ = ['stem'] + [F'stage{idx}' for idx in range(1 ,len(__snake_case ) + 1 )] a__ , a__ = get_aligned_output_features_output_indices( out_features=__snake_case ,out_indices=__snake_case ,stage_names=self.stage_names )
358
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case : Optional[int] = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : List[str] = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
109
0
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": _UpperCAmelCase : str = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) _UpperCAmelCase : Tuple = parser.parse_args() _UpperCAmelCase : Tuple = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) _UpperCAmelCase : Optional[int] = CLIPImageProcessor() _UpperCAmelCase : Tuple = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") _UpperCAmelCase : int = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
285
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_features''', '''is_longer'''] def __init__( self , snake_case=64 , snake_case=4_8000 , snake_case=480 , snake_case=10 , snake_case=1024 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4000 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ): super().__init__( feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , ) snake_case_ = top_db snake_case_ = truncation snake_case_ = padding snake_case_ = fft_window_size snake_case_ = (fft_window_size >> 1) + 1 snake_case_ = hop_length snake_case_ = max_length_s snake_case_ = max_length_s * sampling_rate snake_case_ = sampling_rate snake_case_ = frequency_min snake_case_ = frequency_max snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='htk' , ) snake_case_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , ) def a ( self ): snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a ( self , snake_case , snake_case = None ): snake_case_ = spectrogram( snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='dB' , ) return log_mel_spectrogram.T def a ( self , snake_case , snake_case , snake_case ): snake_case_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk snake_case_ = [0] # randomly choose index for each part snake_case_ = np.random.choice(ranges[0] ) snake_case_ = np.random.choice(ranges[1] ) snake_case_ = np.random.choice(ranges[2] ) snake_case_ = mel[idx_front : idx_front + chunk_frames, :] snake_case_ = mel[idx_middle : idx_middle + chunk_frames, :] snake_case_ = mel[idx_back : idx_back + chunk_frames, :] snake_case_ = torch.tensor(mel[None, None, :] ) snake_case_ = torch.nn.functional.interpolate( snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case ) snake_case_ = mel_shrink[0][0].numpy() snake_case_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def a ( self , snake_case , snake_case , snake_case , snake_case ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": snake_case_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad snake_case_ = len(snake_case ) - max_length snake_case_ = np.random.randint(0 , overflow + 1 ) snake_case_ = waveform[idx : idx + max_length] snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] elif truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed snake_case_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. snake_case_ = np.stack([mel, mel, mel, mel] , axis=0 ) snake_case_ = False else: snake_case_ = self._random_mel_fusion(snake_case , snake_case , snake_case ) snake_case_ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: snake_case_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": snake_case_ = int(max_length / len(snake_case ) ) snake_case_ = np.stack(np.tile(snake_case , snake_case ) ) snake_case_ = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters ) snake_case_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ): snake_case_ = truncation if truncation is not None else self.truncation snake_case_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) snake_case_ = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) snake_case_ = is_batched_numpy or ( isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray ): snake_case_ = np.asarray(snake_case , dtype=np.floataa ) elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): snake_case_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: snake_case_ = [np.asarray(snake_case )] # convert to mel spectrogram, truncate and pad if needed. snake_case_ = [ self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case ) for waveform in raw_speech ] snake_case_ = [] snake_case_ = [] for mel, longer in padded_inputs: input_mel.append(snake_case ) is_longer.append(snake_case ) if truncation == "fusion" and sum(snake_case ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer snake_case_ = np.random.randint(0 , len(snake_case ) ) snake_case_ = True if isinstance(input_mel[0] , snake_case ): snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool snake_case_ = [[longer] for longer in is_longer] snake_case_ = {'input_features': input_mel, 'is_longer': is_longer} snake_case_ = BatchFeature(snake_case ) if return_tensors is not None: snake_case_ = input_features.convert_to_tensors(snake_case ) return input_features
285
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCAmelCase__ (): """simple docstring""" snake_case = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png''' snake_case = Image.open(requests.get(UpperCamelCase_ ,stream=UpperCamelCase_ ).raw ).convert('''RGB''' ) return image def UpperCAmelCase__ (UpperCamelCase_ ): """simple docstring""" snake_case = [] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') ) # fmt: on return rename_keys def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" snake_case = dct.pop(UpperCamelCase_ ) snake_case = val def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases snake_case = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) snake_case = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict snake_case = torch.cat((q_bias, torch.zeros_like(UpperCamelCase_ ,requires_grad=UpperCamelCase_ ), v_bias) ) snake_case = qkv_bias def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ): """simple docstring""" snake_case = 3_64 if '''coco''' in model_name else 2_24 snake_case = BlipaVisionConfig(image_size=UpperCamelCase_ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: snake_case = OPTConfig.from_pretrained('''facebook/opt-2.7b''' ,eos_token_id=UpperCamelCase_ ).to_dict() elif "opt-6.7b" in model_name: snake_case = OPTConfig.from_pretrained('''facebook/opt-6.7b''' ,eos_token_id=UpperCamelCase_ ).to_dict() elif "t5-xl" in model_name: snake_case = TaConfig.from_pretrained('''google/flan-t5-xl''' ,dense_act_fn='''gelu''' ,bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: snake_case = TaConfig.from_pretrained('''google/flan-t5-xxl''' ,dense_act_fn='''gelu''' ,bos_token_id=1 ).to_dict() snake_case = BlipaConfig(vision_config=UpperCamelCase_ ,text_config=UpperCamelCase_ ) return config, image_size @torch.no_grad() def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=None ,UpperCamelCase_=False ): """simple docstring""" snake_case = ( AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' ) if '''opt''' in model_name else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' ) ) snake_case = tokenizer('''\n''' ,add_special_tokens=UpperCamelCase_ ).input_ids[0] snake_case , snake_case = get_blipa_config(UpperCamelCase_ ,eos_token_id=UpperCamelCase_ ) snake_case = BlipaForConditionalGeneration(UpperCamelCase_ ).eval() snake_case = { '''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''), '''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''), '''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''), '''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''), '''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''), '''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''), '''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''), } snake_case , snake_case = model_name_to_original[model_name] # load original model print('''Loading original model...''' ) snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu''' snake_case , snake_case , snake_case = load_model_and_preprocess( name=UpperCamelCase_ ,model_type=UpperCamelCase_ ,is_eval=UpperCamelCase_ ,device=UpperCamelCase_ ) original_model.eval() print('''Done!''' ) # update state dict keys snake_case = original_model.state_dict() snake_case = create_rename_keys(UpperCamelCase_ ) for src, dest in rename_keys: rename_key(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): snake_case = state_dict.pop(UpperCamelCase_ ) if key.startswith('''Qformer.bert''' ): snake_case = key.replace('''Qformer.bert''' ,'''qformer''' ) if "attention.self" in key: snake_case = key.replace('''self''' ,'''attention''' ) if "opt_proj" in key: snake_case = key.replace('''opt_proj''' ,'''language_projection''' ) if "t5_proj" in key: snake_case = key.replace('''t5_proj''' ,'''language_projection''' ) if key.startswith('''opt''' ): snake_case = key.replace('''opt''' ,'''language''' ) if key.startswith('''t5''' ): snake_case = key.replace('''t5''' ,'''language''' ) snake_case = val # read in qv biases read_in_q_v_bias(UpperCamelCase_ ,UpperCamelCase_ ) snake_case , snake_case = hf_model.load_state_dict(UpperCamelCase_ ,strict=UpperCamelCase_ ) assert len(UpperCamelCase_ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] snake_case = load_demo_image() snake_case = vis_processors['''eval'''](UpperCamelCase_ ).unsqueeze(0 ).to(UpperCamelCase_ ) snake_case = tokenizer(['''\n'''] ,return_tensors='''pt''' ).input_ids.to(UpperCamelCase_ ) # create processor snake_case = BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} ,image_mean=UpperCamelCase_ ,image_std=UpperCamelCase_ ) snake_case = BlipaProcessor(image_processor=UpperCamelCase_ ,tokenizer=UpperCamelCase_ ) snake_case = processor(images=UpperCamelCase_ ,return_tensors='''pt''' ).pixel_values.to(UpperCamelCase_ ) # make sure processor creates exact same pixel values assert torch.allclose(UpperCamelCase_ ,UpperCamelCase_ ) original_model.to(UpperCamelCase_ ) hf_model.to(UpperCamelCase_ ) with torch.no_grad(): if "opt" in model_name: snake_case = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits snake_case = hf_model(UpperCamelCase_ ,UpperCamelCase_ ).logits else: snake_case = original_model( {'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits snake_case = input_ids.masked_fill(input_ids == tokenizer.pad_token_id ,-1_00 ) snake_case = hf_model(UpperCamelCase_ ,UpperCamelCase_ ,labels=UpperCamelCase_ ).logits assert original_logits.shape == logits.shape print('''First values of original logits:''' ,original_logits[0, :3, :3] ) print('''First values of HF logits:''' ,logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": snake_case = torch.tensor( [[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] ,device=UpperCamelCase_ ) assert torch.allclose(logits[0, :3, :3] ,UpperCamelCase_ ,atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": snake_case = torch.tensor( [[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] ,device=UpperCamelCase_ ) else: # cast to same type snake_case = logits.dtype assert torch.allclose(original_logits.to(UpperCamelCase_ ) ,UpperCamelCase_ ,atol=1e-2 ) print('''Looks ok!''' ) print('''Generating a caption...''' ) snake_case = '''''' snake_case = tokenizer(UpperCamelCase_ ,return_tensors='''pt''' ).input_ids.to(UpperCamelCase_ ) snake_case = original_model.generate({'''image''': original_pixel_values} ) snake_case = hf_model.generate( UpperCamelCase_ ,UpperCamelCase_ ,do_sample=UpperCamelCase_ ,num_beams=5 ,max_length=30 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.0 ,length_penalty=1.0 ,temperature=1 ,) print('''Original generation:''' ,UpperCamelCase_ ) snake_case = input_ids.shape[1] snake_case = processor.batch_decode(outputs[:, prompt_length:] ,skip_special_tokens=UpperCamelCase_ ) snake_case = [text.strip() for text in output_text] print('''HF generation:''' ,UpperCamelCase_ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(UpperCamelCase_ ) hf_model.save_pretrained(UpperCamelCase_ ) if push_to_hub: processor.push_to_hub(F'''nielsr/{model_name}''' ) hf_model.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser() _SCREAMING_SNAKE_CASE : str = [ "blip2-opt-2.7b", "blip2-opt-6.7b", "blip2-opt-2.7b-coco", "blip2-opt-6.7b-coco", "blip2-flan-t5-xl", "blip2-flan-t5-xl-coco", "blip2-flan-t5-xxl", ] parser.add_argument( "--model_name", default="blip2-opt-2.7b", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) _SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
213
from __future__ import annotations import time _SCREAMING_SNAKE_CASE : List[Any] = list[tuple[int, int]] _SCREAMING_SNAKE_CASE : Any = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _SCREAMING_SNAKE_CASE : Any = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class A__ : """simple docstring""" def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ): snake_case = pos_x snake_case = pos_y snake_case = (pos_y, pos_x) snake_case = goal_x snake_case = goal_y snake_case = parent class A__ : """simple docstring""" def __init__( self , __snake_case , __snake_case ): snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , __snake_case ) snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , __snake_case ) snake_case = [self.start] snake_case = False def a_ ( self ): while self.node_queue: snake_case = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: snake_case = True return self.retrace_path(__snake_case ) snake_case = self.get_successors(__snake_case ) for node in successors: self.node_queue.append(__snake_case ) if not self.reached: return [self.start.pos] return None def a_ ( self , __snake_case ): snake_case = [] for action in delta: snake_case = parent.pos_x + action[1] snake_case = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__snake_case ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(__snake_case , __snake_case , self.target.pos_y , self.target.pos_x , __snake_case ) ) return successors def a_ ( self , __snake_case ): snake_case = node snake_case = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) snake_case = current_node.parent path.reverse() return path class A__ : """simple docstring""" def __init__( self , __snake_case , __snake_case ): snake_case = BreadthFirstSearch(__snake_case , __snake_case ) snake_case = BreadthFirstSearch(__snake_case , __snake_case ) snake_case = False def a_ ( self ): while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: snake_case = self.fwd_bfs.node_queue.pop(0 ) snake_case = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: snake_case = True return self.retrace_bidirectional_path( __snake_case , __snake_case ) snake_case = current_bwd_node snake_case = current_fwd_node snake_case = { self.fwd_bfs: self.fwd_bfs.get_successors(__snake_case ), self.bwd_bfs: self.bwd_bfs.get_successors(__snake_case ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(__snake_case ) if not self.reached: return [self.fwd_bfs.start.pos] return None def a_ ( self , __snake_case , __snake_case ): snake_case = self.fwd_bfs.retrace_path(__snake_case ) snake_case = self.bwd_bfs.retrace_path(__snake_case ) bwd_path.pop() bwd_path.reverse() snake_case = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() _SCREAMING_SNAKE_CASE : Optional[Any] = (0, 0) _SCREAMING_SNAKE_CASE : List[Any] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) _SCREAMING_SNAKE_CASE : List[Any] = time.time() _SCREAMING_SNAKE_CASE : List[Any] = BreadthFirstSearch(init, goal) _SCREAMING_SNAKE_CASE : List[str] = bfs.search() _SCREAMING_SNAKE_CASE : int = time.time() - start_bfs_time print("Unidirectional BFS computation time : ", bfs_time) _SCREAMING_SNAKE_CASE : Any = time.time() _SCREAMING_SNAKE_CASE : Union[str, Any] = BidirectionalBreadthFirstSearch(init, goal) _SCREAMING_SNAKE_CASE : Union[str, Any] = bd_bfs.search() _SCREAMING_SNAKE_CASE : Tuple = time.time() - start_bd_bfs_time print("Bidirectional BFS computation time : ", bd_bfs_time)
213
1
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _UpperCamelCase ( UpperCamelCase__ , unittest.TestCase ): UpperCAmelCase_ = BertTokenizer UpperCAmelCase_ = BertTokenizerFast UpperCAmelCase_ = True UpperCAmelCase_ = True UpperCAmelCase_ = filter_non_english def UpperCAmelCase_ ( self :Tuple ) -> str: super().setUp() UpperCAmelCase__ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def UpperCAmelCase_ ( self :Optional[Any] , lowerCamelCase :Optional[int] ) -> str: UpperCAmelCase__ = "UNwant\u00E9d,running" UpperCAmelCase__ = "unwanted, running" return input_text, output_text def UpperCAmelCase_ ( self :Dict ) -> Tuple: UpperCAmelCase__ = self.tokenizer_class(self.vocab_file ) UpperCAmelCase__ = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(__lowerCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase_ ( self :Any ) -> Dict: if not self.test_rust_tokenizer: return UpperCAmelCase__ = self.get_tokenizer() UpperCAmelCase__ = self.get_rust_tokenizer() UpperCAmelCase__ = "UNwant\u00E9d,running" UpperCAmelCase__ = tokenizer.tokenize(__lowerCamelCase ) UpperCAmelCase__ = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCAmelCase__ = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) UpperCAmelCase__ = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCAmelCase__ = self.get_rust_tokenizer() UpperCAmelCase__ = tokenizer.encode(__lowerCamelCase ) UpperCAmelCase__ = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) # With lower casing UpperCAmelCase__ = self.get_tokenizer(do_lower_case=__lowerCamelCase ) UpperCAmelCase__ = self.get_rust_tokenizer(do_lower_case=__lowerCamelCase ) UpperCAmelCase__ = "UNwant\u00E9d,running" UpperCAmelCase__ = tokenizer.tokenize(__lowerCamelCase ) UpperCAmelCase__ = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCAmelCase__ = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) UpperCAmelCase__ = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCAmelCase__ = self.get_rust_tokenizer() UpperCAmelCase__ = tokenizer.encode(__lowerCamelCase ) UpperCAmelCase__ = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def UpperCAmelCase_ ( self :Optional[int] ) -> Union[str, Any]: UpperCAmelCase__ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def UpperCAmelCase_ ( self :List[str] ) -> Dict: UpperCAmelCase__ = BasicTokenizer(do_lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self :Union[str, Any] ) -> List[Any]: UpperCAmelCase__ = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def UpperCAmelCase_ ( self :Optional[int] ) -> int: UpperCAmelCase__ = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self :List[Any] ) -> List[Any]: UpperCAmelCase__ = BasicTokenizer(do_lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self :Optional[int] ) -> Optional[Any]: UpperCAmelCase__ = BasicTokenizer(do_lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self :Optional[Any] ) -> List[str]: UpperCAmelCase__ = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self :List[str] ) -> List[Any]: UpperCAmelCase__ = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self :Optional[int] ) -> List[str]: UpperCAmelCase__ = BasicTokenizer(do_lower_case=__lowerCamelCase , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def UpperCAmelCase_ ( self :Optional[int] ) -> Optional[Any]: UpperCAmelCase__ = BasicTokenizer() UpperCAmelCase__ = "a\n\'ll !!to?\'d of, can\'t." UpperCAmelCase__ = ["a", "\'", "ll", "!", "!", "to", "?", "\'", "d", "of", ",", "can", "\'", "t", "."] self.assertListEqual(tokenizer.tokenize(__lowerCamelCase ) , __lowerCamelCase ) def UpperCAmelCase_ ( self :Optional[Any] ) -> Optional[Any]: UpperCAmelCase__ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] UpperCAmelCase__ = {} for i, token in enumerate(__lowerCamelCase ): UpperCAmelCase__ = i UpperCAmelCase__ = WordpieceTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def UpperCAmelCase_ ( self :Dict ) -> str: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def UpperCAmelCase_ ( self :List[Any] ) -> List[Any]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def UpperCAmelCase_ ( self :int ) -> Dict: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def UpperCAmelCase_ ( self :int ) -> Union[str, Any]: UpperCAmelCase__ = self.get_tokenizer() UpperCAmelCase__ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(__lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def UpperCAmelCase_ ( self :Any ) -> Optional[Any]: UpperCAmelCase__ = self.tokenizer_class.from_pretrained("bert-base-uncased" ) UpperCAmelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase ) UpperCAmelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase ) UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase_ ( self :List[str] ) -> Optional[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) UpperCAmelCase__ = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' UpperCAmelCase__ = tokenizer_r.encode_plus( __lowerCamelCase , return_attention_mask=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase , ) UpperCAmelCase__ = tokenizer_r.do_lower_case if hasattr(__lowerCamelCase , "do_lower_case" ) else False UpperCAmelCase__ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def UpperCAmelCase_ ( self :int ) -> Union[str, Any]: UpperCAmelCase__ = ["的", "人", "有"] UpperCAmelCase__ = "".join(__lowerCamelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase__ = True UpperCAmelCase__ = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) UpperCAmelCase__ = tokenizer_p.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) UpperCAmelCase__ = tokenizer_r.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) UpperCAmelCase__ = tokenizer_r.convert_ids_to_tokens(__lowerCamelCase ) UpperCAmelCase__ = tokenizer_p.convert_ids_to_tokens(__lowerCamelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) UpperCAmelCase__ = False UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) UpperCAmelCase__ = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) UpperCAmelCase__ = tokenizer_r.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) UpperCAmelCase__ = tokenizer_p.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) UpperCAmelCase__ = tokenizer_r.convert_ids_to_tokens(__lowerCamelCase ) UpperCAmelCase__ = tokenizer_p.convert_ids_to_tokens(__lowerCamelCase ) # it is expected that only the first Chinese character is not preceded by "##". UpperCAmelCase__ = [ f'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCamelCase ) ] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
169
from __future__ import annotations import math def UpperCamelCase__( UpperCamelCase__ : int )->bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def UpperCamelCase__( UpperCamelCase__ : int )->list[int]: A__ = str(UpperCamelCase__ ) A__ = [n] for i in range(1 , len(UpperCamelCase__ ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def UpperCamelCase__( UpperCamelCase__ : int )->bool: if len(str(UpperCamelCase__ ) ) > 3: if not is_prime(int(str(UpperCamelCase__ )[-3:] ) ) or not is_prime(int(str(UpperCamelCase__ )[:3] ) ): return False return True def UpperCamelCase__( UpperCamelCase__ : int = 11 )->list[int]: A__ = [] A__ = 13 while len(UpperCamelCase__ ) != count: if validate(UpperCamelCase__ ): A__ = list_truncated_nums(UpperCamelCase__ ) if all(is_prime(UpperCamelCase__ ) for i in list_nums ): list_truncated_primes.append(UpperCamelCase__ ) num += 2 return list_truncated_primes def UpperCamelCase__( )->int: return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(F"{sum(compute_truncated_primes(11)) = }")
193
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase : Any = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConfig""", """AltCLIPTextConfig""", """AltCLIPVisionConfig""", ], """processing_altclip""": ["""AltCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ """ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """AltCLIPPreTrainedModel""", """AltCLIPModel""", """AltCLIPTextModel""", """AltCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
352
import enum import shutil import sys lowercase , lowercase : List[Any] = shutil.get_terminal_size() lowercase : Union[str, Any] = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""} class A__ ( enum.Enum ): """simple docstring""" __A : List[str] = 0 __A : str = 1 def A_ ( A__ , A__="" ) -> int: sys.stdout.write(str(A__ ) + end ) sys.stdout.flush() def A_ ( A__ , A__ , A__="" ) -> int: forceWrite(F'\u001b[{color}m{content}\u001b[0m' , A__ ) def A_ ( ) -> Any: forceWrite('\r' ) def A_ ( A__ , A__ ) -> List[str]: forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' ) def A_ ( ) -> Any: forceWrite(' ' * TERMINAL_WIDTH ) reset_cursor() def A_ ( ) -> Any: reset_cursor() forceWrite('-' * TERMINAL_WIDTH )
225
0
from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCAmelCase_ ( __A ) -> List[str]: '''simple docstring''' if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(__lowerCamelCase ): return ext raise Exception( f"""Unable to determine file format from file extension {path}. """ f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" ) def lowerCAmelCase_ ( __A ) -> Any: '''simple docstring''' UpperCAmelCase__ = pipeline( task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, ) UpperCAmelCase__ = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format UpperCAmelCase__ = PipelineDataFormat.from_str( format=__lowerCamelCase, output_path=args.output, input_path=args.input, column=args.column if args.column else nlp.default_input_names, overwrite=args.overwrite, ) return RunCommand(__lowerCamelCase, __lowerCamelCase ) class A ( _lowerCAmelCase ): def __init__(self : Optional[int] , __UpperCAmelCase : Pipeline , __UpperCAmelCase : PipelineDataFormat ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = nlp UpperCAmelCase__ = reader @staticmethod def lowercase_ (__UpperCAmelCase : ArgumentParser ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = parser.add_parser("run" , help="Run a pipeline through the CLI" ) run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" ) run_parser.add_argument("--input" , type=__UpperCAmelCase , help="Path to the file to use for inference" ) run_parser.add_argument("--output" , type=__UpperCAmelCase , help="Path to the file that will be used post to write results." ) run_parser.add_argument("--model" , type=__UpperCAmelCase , help="Name or path to the model to instantiate." ) run_parser.add_argument("--config" , type=__UpperCAmelCase , help="Name or path to the model's config to instantiate." ) run_parser.add_argument( "--tokenizer" , type=__UpperCAmelCase , help="Name of the tokenizer to use. (default: same as the model name)" ) run_parser.add_argument( "--column" , type=__UpperCAmelCase , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , ) run_parser.add_argument( "--format" , type=__UpperCAmelCase , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , ) run_parser.add_argument( "--device" , type=__UpperCAmelCase , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , ) run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." ) run_parser.set_defaults(func=__UpperCAmelCase ) def lowercase_ (self : Any ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = self._nlp, [] for entry in self._reader: UpperCAmelCase__ = nlp(**__UpperCAmelCase ) if self._reader.is_multi_columns else nlp(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ): outputs.append(__UpperCAmelCase ) else: outputs += output # Saving data if self._nlp.binary_output: UpperCAmelCase__ = self._reader.save_binary(__UpperCAmelCase ) logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" ) else: self._reader.save(__UpperCAmelCase )
65
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging _snake_case : Any = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase : int = 101 ) -> Dict: __snake_case : str = length def __len__( self : Optional[Any] ) -> Any: return self.length def __getitem__( self : int , lowerCamelCase : Optional[Any] ) -> int: return i class a : """simple docstring""" def __call__( self : List[Any] , lowerCamelCase : Any ) -> Tuple: return {"input_ids": torch.tensor(lowerCamelCase ), "labels": torch.tensor(lowerCamelCase )} class a (nn.Module ): """simple docstring""" def __init__( self : Optional[Any] ) -> str: super().__init__() # Add some (unused) params otherwise DDP will complain. __snake_case : Optional[Any] = nn.Linear(120 , 80 ) def __snake_case ( self : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=None ) -> Optional[Any]: if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class a (_lowerCAmelCase ): """simple docstring""" @require_torch_neuroncore def __snake_case ( self : Union[str, Any] ) -> Optional[Any]: __snake_case : Dict = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split() __snake_case : Optional[int] = self.get_auto_remove_tmp_dir() __snake_case : int = F'--output_dir {output_dir}'.split() __snake_case : str = ["torchrun"] + distributed_args + args execute_subprocess_async(lowerCamelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class a (_lowerCAmelCase ): """simple docstring""" @require_torch_multi_gpu def __snake_case ( self : str ) -> int: __snake_case : List[str] = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split() __snake_case : Optional[int] = self.get_auto_remove_tmp_dir() __snake_case : int = F'--output_dir {output_dir}'.split() __snake_case : Optional[int] = ["torchrun"] + distributed_args + args execute_subprocess_async(lowerCamelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py _snake_case : Optional[int] = HfArgumentParser((TrainingArguments,)) _snake_case : int = parser.parse_args_into_dataclasses()[0] logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, ''' f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}''' ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: _snake_case : Optional[int] = DummyDataset(dataset_length) def lowerCAmelCase_ ( __lowerCamelCase ): __snake_case : Optional[Any] = list(range(len(__lowerCamelCase ) ) ) __snake_case : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " F'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' ) return {"success": success} _snake_case : List[str] = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) _snake_case : List[str] = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) _snake_case : str = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) _snake_case : List[Any] = 2 _snake_case : Union[str, Any] = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) _snake_case : Dict = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) _snake_case : int = None
123
0
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline lowerCamelCase_ : Optional[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class _UpperCamelCase ( datasets.BuilderConfig ): '''simple docstring''' __UpperCamelCase : Optional[datasets.Features] = None __UpperCamelCase : str = "utf-8" __UpperCamelCase : Optional[str] = None __UpperCamelCase : Optional[str] = None __UpperCamelCase : bool = True # deprecated __UpperCamelCase : Optional[int] = None # deprecated __UpperCamelCase : int = 10 << 20 # 10MB __UpperCamelCase : Optional[bool] = None class _UpperCamelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' __UpperCamelCase : Tuple = JsonConfig def lowerCAmelCase__ ( self : int ): if self.config.block_size is not None: logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" ) UpperCamelCase_: List[str] = self.config.block_size if self.config.use_threads is not True: logger.warning( """The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" ) if self.config.newlines_in_values is not None: raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" ) return datasets.DatasetInfo(features=self.config.features ) def lowerCAmelCase__ ( self : Dict , snake_case_ : str ): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) UpperCamelCase_: Dict = dl_manager.download_and_extract(self.config.data_files ) if isinstance(snake_case_ , (str, list, tuple) ): UpperCamelCase_: List[Any] = data_files if isinstance(snake_case_ , snake_case_ ): UpperCamelCase_: str = [files] UpperCamelCase_: Any = [dl_manager.iter_files(snake_case_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] UpperCamelCase_: Dict = [] for split_name, files in data_files.items(): if isinstance(snake_case_ , snake_case_ ): UpperCamelCase_: Tuple = [files] UpperCamelCase_: Optional[int] = [dl_manager.iter_files(snake_case_ ) for file in files] splits.append(datasets.SplitGenerator(name=snake_case_ , gen_kwargs={"""files""": files} ) ) return splits def lowerCAmelCase__ ( self : str , snake_case_ : pa.Table ): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): UpperCamelCase_: Union[str, Any] = self.config.features.arrow_schema.field(snake_case_ ).type UpperCamelCase_: Tuple = pa_table.append_column(snake_case_ , pa.array([None] * len(snake_case_ ) , type=snake_case_ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example UpperCamelCase_: int = table_cast(snake_case_ , self.config.features.arrow_schema ) return pa_table def lowerCAmelCase__ ( self : Dict , snake_case_ : Optional[Any] ): for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case_ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: UpperCamelCase_: Dict = json.load(snake_case_ ) # We keep only the field we are interested in UpperCamelCase_: Optional[int] = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(snake_case_ , (list, tuple) ): UpperCamelCase_: Optional[int] = set().union(*[row.keys() for row in dataset] ) UpperCamelCase_: int = {col: [row.get(snake_case_ ) for row in dataset] for col in keys} else: UpperCamelCase_: Optional[int] = dataset UpperCamelCase_: List[str] = pa.Table.from_pydict(snake_case_ ) yield file_idx, self._cast_table(snake_case_ ) # If the file has one json object per line else: with open(snake_case_ , """rb""" ) as f: UpperCamelCase_: Optional[int] = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small UpperCamelCase_: Optional[int] = max(self.config.chunksize // 32 , 16 << 10 ) UpperCamelCase_: Tuple = ( self.config.encoding_errors if self.config.encoding_errors is not None else """strict""" ) while True: UpperCamelCase_: int = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(snake_case_ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": UpperCamelCase_: Tuple = batch.decode(self.config.encoding , errors=snake_case_ ).encode("""utf-8""" ) try: while True: try: UpperCamelCase_: Tuple = paj.read_json( io.BytesIO(snake_case_ ) , read_options=paj.ReadOptions(block_size=snake_case_ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(snake_case_ , pa.ArrowInvalid ) and "straddling" not in str(snake_case_ ) or block_size > len(snake_case_ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f'''Batch of {len(snake_case_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: UpperCamelCase_: Optional[Any] = json.load(snake_case_ ) except json.JSONDecodeError: logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(snake_case_ , snake_case_ ): # list is the only sequence type supported in JSON try: UpperCamelCase_: Any = set().union(*[row.keys() for row in dataset] ) UpperCamelCase_: List[str] = {col: [row.get(snake_case_ ) for row in dataset] for col in keys} UpperCamelCase_: int = pa.Table.from_pydict(snake_case_ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' ) raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None yield file_idx, self._cast_table(snake_case_ ) break else: logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' ) raise ValueError( f'''Not able to read records in the JSON file at {file}. ''' f'''You should probably indicate the field of the JSON file containing your records. ''' f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ''' f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(snake_case_ ) batch_idx += 1
357
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowerCamelCase_ : List[str] = False lowerCamelCase_ : int = logging.get_logger(__name__) lowerCamelCase_ : Optional[int] = """ybelkada/fonts""" def A__ ( ) -> Dict: if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ''' """Pix2StructImageProcessor. Please upgrade torch.""" ) def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]: requires_backends(lowerCamelCase , ["""torch"""] ) _check_torch_version() UpperCamelCase_: Tuple = image_tensor.unsqueeze(0 ) UpperCamelCase_: Any = torch.nn.functional.unfold(lowerCamelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) ) UpperCamelCase_: int = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowerCamelCase , lowerCamelCase , -1 ) UpperCamelCase_: Any = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def A__ ( lowerCamelCase , lowerCamelCase = 36 , lowerCamelCase = "black" , lowerCamelCase = "white" , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = None , lowerCamelCase = None , ) -> Image.Image: requires_backends(lowerCamelCase , """vision""" ) # Add new lines so that each line is no more than 80 characters. UpperCamelCase_: List[str] = textwrap.TextWrapper(width=80 ) UpperCamelCase_: Optional[int] = wrapper.wrap(text=lowerCamelCase ) UpperCamelCase_: List[str] = """\n""".join(lowerCamelCase ) if font_bytes is not None and font_path is None: UpperCamelCase_: List[Any] = io.BytesIO(lowerCamelCase ) elif font_path is not None: UpperCamelCase_: List[Any] = font_path else: UpperCamelCase_: Tuple = hf_hub_download(lowerCamelCase , """Arial.TTF""" ) UpperCamelCase_: Optional[Any] = ImageFont.truetype(lowerCamelCase , encoding="""UTF-8""" , size=lowerCamelCase ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. UpperCamelCase_: str = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , lowerCamelCase ) ) UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[int] = temp_draw.textbbox((0, 0) , lowerCamelCase , lowerCamelCase ) # Create the actual image with a bit of padding around the text. UpperCamelCase_: Optional[int] = text_width + left_padding + right_padding UpperCamelCase_: List[str] = text_height + top_padding + bottom_padding UpperCamelCase_: Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) , lowerCamelCase ) UpperCamelCase_: Optional[Any] = ImageDraw.Draw(lowerCamelCase ) draw.text(xy=(left_padding, top_padding) , text=lowerCamelCase , fill=lowerCamelCase , font=lowerCamelCase ) return image def A__ ( lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> List[str]: requires_backends(lowerCamelCase , """vision""" ) # Convert to PIL image if necessary UpperCamelCase_: List[str] = to_pil_image(lowerCamelCase ) UpperCamelCase_: Union[str, Any] = render_text(lowerCamelCase , **lowerCamelCase ) UpperCamelCase_: Tuple = max(header_image.width , image.width ) UpperCamelCase_: Tuple = int(image.height * (new_width / image.width) ) UpperCamelCase_: Dict = int(header_image.height * (new_width / header_image.width) ) UpperCamelCase_: str = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary UpperCamelCase_: Optional[Any] = to_numpy_array(lowerCamelCase ) if infer_channel_dimension_format(lowerCamelCase ) == ChannelDimension.LAST: UpperCamelCase_: Tuple = to_channel_dimension_format(lowerCamelCase , ChannelDimension.LAST ) return new_image class _UpperCamelCase ( _A ): '''simple docstring''' __UpperCamelCase : Optional[int] = ["""flattened_patches"""] def __init__( self : int , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : int = 2048 , snake_case_ : bool = False , **snake_case_ : Any , ): super().__init__(**snake_case_ ) UpperCamelCase_: int = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} UpperCamelCase_: Tuple = do_normalize UpperCamelCase_: List[Any] = do_convert_rgb UpperCamelCase_: Tuple = max_patches UpperCamelCase_: Tuple = is_vqa def lowerCAmelCase__ ( self : int , snake_case_ : np.ndarray , snake_case_ : int , snake_case_ : dict , **snake_case_ : Tuple ): requires_backends(self.extract_flattened_patches , """torch""" ) _check_torch_version() # convert to torch UpperCamelCase_: int = to_channel_dimension_format(snake_case_ , ChannelDimension.FIRST ) UpperCamelCase_: List[str] = torch.from_numpy(snake_case_ ) UpperCamelCase_, UpperCamelCase_: List[Any] = patch_size["""height"""], patch_size["""width"""] UpperCamelCase_, UpperCamelCase_: Tuple = get_image_size(snake_case_ ) # maximize scale s.t. UpperCamelCase_: List[Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) UpperCamelCase_: Any = max(min(math.floor(scale * image_height / patch_height ) , snake_case_ ) , 1 ) UpperCamelCase_: List[str] = max(min(math.floor(scale * image_width / patch_width ) , snake_case_ ) , 1 ) UpperCamelCase_: int = max(num_feasible_rows * patch_height , 1 ) UpperCamelCase_: Optional[Any] = max(num_feasible_cols * patch_width , 1 ) UpperCamelCase_: str = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=snake_case_ , antialias=snake_case_ , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] UpperCamelCase_: List[str] = torch_extract_patches(snake_case_ , snake_case_ , snake_case_ ) UpperCamelCase_: List[Any] = patches.shape UpperCamelCase_: List[str] = patches_shape[1] UpperCamelCase_: Optional[Any] = patches_shape[2] UpperCamelCase_: List[str] = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] UpperCamelCase_: Union[str, Any] = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] UpperCamelCase_: Optional[Any] = torch.arange(snake_case_ ).reshape([rows, 1] ).repeat(1 , snake_case_ ).reshape([rows * columns, 1] ) UpperCamelCase_: Optional[int] = torch.arange(snake_case_ ).reshape([1, columns] ).repeat(snake_case_ , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] UpperCamelCase_: Union[str, Any] = row_ids.to(torch.floataa ) UpperCamelCase_: str = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] UpperCamelCase_: Optional[Any] = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] UpperCamelCase_: Tuple = torch.nn.functional.pad(snake_case_ , [0, 0, 0, max_patches - (rows * columns)] ).float() UpperCamelCase_: List[Any] = to_numpy_array(snake_case_ ) return result def lowerCAmelCase__ ( self : List[Any] , snake_case_ : np.ndarray , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Tuple ): if image.dtype == np.uinta: UpperCamelCase_: List[str] = image.astype(np.floataa ) # take mean across the whole `image` UpperCamelCase_: str = np.mean(snake_case_ ) UpperCamelCase_: str = np.std(snake_case_ ) UpperCamelCase_: str = max(snake_case_ , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , **snake_case_ ) def lowerCAmelCase__ ( self : str , snake_case_ : ImageInput , snake_case_ : Optional[str] = None , snake_case_ : bool = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[Dict[str, int]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : Union[str, Any] , ): UpperCamelCase_: Tuple = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase_: Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCamelCase_: Optional[Any] = patch_size if patch_size is not None else self.patch_size UpperCamelCase_: Optional[int] = max_patches if max_patches is not None else self.max_patches UpperCamelCase_: Tuple = self.is_vqa if kwargs.get("""data_format""" , snake_case_ ) is not None: raise ValueError("""data_format is not an accepted input as the outputs are """ ) UpperCamelCase_: Dict = make_list_of_images(snake_case_ ) if not valid_images(snake_case_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCamelCase_: str = [convert_to_rgb(snake_case_ ) for image in images] # All transformations expect numpy arrays. UpperCamelCase_: Union[str, Any] = [to_numpy_array(snake_case_ ) for image in images] if is_vqa: if header_text is None: raise ValueError("""A header text must be provided for VQA models.""" ) UpperCamelCase_: List[Any] = kwargs.pop("""font_bytes""" , snake_case_ ) UpperCamelCase_: List[Any] = kwargs.pop("""font_path""" , snake_case_ ) if isinstance(snake_case_ , snake_case_ ): UpperCamelCase_: str = [header_text] * len(snake_case_ ) UpperCamelCase_: str = [ render_header(snake_case_ , header_text[i] , font_bytes=snake_case_ , font_path=snake_case_ ) for i, image in enumerate(snake_case_ ) ] if do_normalize: UpperCamelCase_: Union[str, Any] = [self.normalize(image=snake_case_ ) for image in images] # convert to torch tensor and permute UpperCamelCase_: str = [ self.extract_flattened_patches(image=snake_case_ , max_patches=snake_case_ , patch_size=snake_case_ ) for image in images ] # create attention mask in numpy UpperCamelCase_: List[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] UpperCamelCase_: Optional[Any] = BatchFeature( data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=snake_case_ ) return encoded_outputs
223
0
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem _a = importlib.util.find_spec("s3fs") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 _a = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowerCAmelCase__(__snake_case ) -> str: '''simple docstring''' if "://" in dataset_path: lowerCamelCase__ = dataset_path.split('''://''' )[1] return dataset_path def lowerCAmelCase__(__snake_case ) -> bool: '''simple docstring''' if fs is not None and fs.protocol != "file": return True else: return False def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Dict: '''simple docstring''' lowerCamelCase__ = not is_remote_filesystem(__snake_case ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__snake_case ) ,fs._strip_protocol(__snake_case ) ) else: fs.mv(__snake_case ,__snake_case ,recursive=__snake_case ) def lowerCAmelCase__() -> None: '''simple docstring''' if hasattr(fsspec.asyn ,'''reset_lock''' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = threading.Lock()
209
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=6_4 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = seq_length lowerCamelCase__ = is_training lowerCamelCase__ = use_input_mask lowerCamelCase__ = use_token_type_ids lowerCamelCase__ = use_labels lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = embedding_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = type_vocab_size lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = num_labels lowerCamelCase__ = num_choices lowerCamelCase__ = scope def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ = None if self.use_input_mask: lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None if self.use_token_type_ids: lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self ): '''simple docstring''' return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = MegatronBertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = MegatronBertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = MegatronBertForCausalLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = MegatronBertForNextSentencePrediction(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCamelCase__ = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = MegatronBertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCamelCase__ = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = MegatronBertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCamelCase__ = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.num_labels lowerCamelCase__ = MegatronBertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.num_labels lowerCamelCase__ = MegatronBertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.num_choices lowerCamelCase__ = MegatronBertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = config_and_inputs lowerCamelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase_ = ( { """feature-extraction""": MegatronBertModel, """fill-mask""": MegatronBertForMaskedLM, """question-answering""": MegatronBertForQuestionAnswering, """text-classification""": MegatronBertForSequenceClassification, """text-generation""": MegatronBertForCausalLM, """token-classification""": MegatronBertForTokenClassification, """zero-shot""": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True # test_resize_embeddings = False lowerCAmelCase_ = False def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ): '''simple docstring''' lowerCamelCase__ = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): lowerCamelCase__ = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) lowerCamelCase__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = MegatronBertModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowerCAmelCase ) def lowerCAmelCase__(__snake_case ) -> List[str]: '''simple docstring''' return torch.tensor( __snake_case ,dtype=torch.long ,device=__snake_case ,) _a = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class __A ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip('''Model is not available.''' ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: lowerCamelCase__ = os.path.join(os.environ['''MYDIR'''] , __lowerCAmelCase ) lowerCamelCase__ = MegatronBertModel.from_pretrained(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.half() lowerCamelCase__ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] ) with torch.no_grad(): lowerCamelCase__ = model(__lowerCAmelCase )[0] lowerCamelCase__ = torch.Size((1, 9, 1_0_2_4) ) self.assertEqual(output.shape , __lowerCAmelCase ) lowerCamelCase__ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): lowerCamelCase__ = output[0, ii, jj] lowerCamelCase__ = expected[3 * ii + jj] lowerCamelCase__ = '''ii={} jj={} a={} b={}'''.format(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) self.assertTrue(math.isclose(__lowerCAmelCase , __lowerCAmelCase , rel_tol=__lowerCAmelCase , abs_tol=__lowerCAmelCase ) , msg=__lowerCAmelCase )
209
1
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase_( _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Optional[int] = {} _lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"] _lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] ) return output _lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments) _lowerCAmelCase : Optional[int] = parser.parse_args() if args.num_workers is None: _lowerCAmelCase : Any = multiprocessing.cpu_count() _lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) _lowerCAmelCase : Union[str, Any] = time.time() _lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''') print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') _lowerCAmelCase : Any = time.time() _lowerCAmelCase : Dict = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ '''repo_name''', '''path''', '''copies''', '''size''', '''content''', '''license''', '''hash''', '''line_mean''', '''line_max''', '''alpha_frac''', '''autogenerated''', ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') _lowerCAmelCase : str = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
340
"""simple docstring""" _lowerCAmelCase : Tuple = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: '''simple docstring''' _lowerCamelCase : Any = [False] * len(_lowerCamelCase ) _lowerCamelCase : Union[str, Any] = [s] _lowerCamelCase : str = True while queue: _lowerCamelCase : Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_lowerCamelCase ) _lowerCamelCase : Any = True _lowerCamelCase : Any = u return visited[t] def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]: '''simple docstring''' _lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase )) _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy. while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Any = float("Inf" ) _lowerCamelCase : Dict = sink while s != source: # Find the minimum value in select path _lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] ) _lowerCamelCase : Union[str, Any] = parent[s] max_flow += path_flow _lowerCamelCase : Optional[Any] = sink while v != source: _lowerCamelCase : Union[str, Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow _lowerCamelCase : List[str] = parent[v] for i in range(len(_lowerCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
340
1
'''simple docstring''' from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record _lowerCAmelCase = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' _lowerCAmelCase = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' _lowerCAmelCase = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" return float((preds == labels).mean() ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase="binary" ): """simple docstring""" lowerCAmelCase__ : Any = simple_accuracy(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Tuple = float(fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average=UpperCamelCase ) ) return { "accuracy": acc, "f1": fa, } def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[str] = {} for id_pred, label in zip(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" lowerCAmelCase__ : Dict = id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: lowerCAmelCase__ : Optional[int] = [(pred, label)] lowerCAmelCase__ , lowerCAmelCase__ : int = [], [] for question, preds_labels in question_map.items(): lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = zip(*UpperCamelCase ) lowerCAmelCase__ : List[Any] = fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average="""macro""" ) fas.append(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase ) ) ems.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = float(sum(UpperCamelCase ) / len(UpperCamelCase ) ) lowerCAmelCase__ : List[Any] = sum(UpperCamelCase ) / len(UpperCamelCase ) lowerCAmelCase__ : Dict = float(fa_score(y_true=UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None ,) def UpperCAmelCase_ ( self ) -> str: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(__UpperCAmelCase ,__UpperCAmelCase )} elif self.config_name == "cb": return acc_and_fa(__UpperCAmelCase ,__UpperCAmelCase ,fa_avg="""macro""" ) elif self.config_name == "record": lowerCAmelCase__ : Optional[Any] = [ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] lowerCAmelCase__ : Union[str, Any] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(__UpperCAmelCase ,__UpperCAmelCase )[0] elif self.config_name == "multirc": return evaluate_multirc(__UpperCAmelCase ,__UpperCAmelCase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(__UpperCAmelCase ,__UpperCAmelCase )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
37
def _SCREAMING_SNAKE_CASE ( a , a = 0 ) -> list: __A : int = length or len(a ) __A : str = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: __A , __A : Optional[int] = list_data[i + 1], list_data[i] __A : Union[str, Any] = True return list_data if not swapped else bubble_sort(a , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
280
0
'''simple docstring''' from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES _lowerCamelCase : List[str] = logging.get_logger(__name__) _lowerCamelCase : Any = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) _lowerCamelCase : Optional[Any] = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) _lowerCamelCase : int = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) _lowerCamelCase : List[str] = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) _lowerCamelCase : int = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) _lowerCamelCase : int = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) _lowerCamelCase : Optional[int] = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) _lowerCamelCase : Optional[Any] = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) _lowerCamelCase : Any = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) _lowerCamelCase : Union[str, Any] = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) _lowerCamelCase : Dict = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) _lowerCamelCase : int = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) _lowerCamelCase : Union[str, Any] = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) _lowerCamelCase : Any = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) _lowerCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) _lowerCamelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) _lowerCamelCase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) _lowerCamelCase : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) _lowerCamelCase : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) _lowerCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) _lowerCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) _lowerCamelCase : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) _lowerCamelCase : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) _lowerCamelCase : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) _lowerCamelCase : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) _lowerCamelCase : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) _lowerCamelCase : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) _lowerCamelCase : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class __UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __lowerCAmelCase = FLAX_MODEL_MAPPING _lowerCamelCase : Optional[Any] = auto_class_update(FlaxAutoModel) class __UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __lowerCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING _lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class __UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __lowerCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING _lowerCamelCase : List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class __UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __lowerCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING _lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class __UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __lowerCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _lowerCamelCase : Tuple = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class __UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __lowerCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _lowerCamelCase : Tuple = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class __UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __lowerCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING _lowerCamelCase : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class __UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __lowerCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING _lowerCamelCase : str = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class __UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __lowerCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING _lowerCamelCase : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class __UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __lowerCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING _lowerCamelCase : List[Any] = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class __UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __lowerCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING _lowerCamelCase : Union[str, Any] = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class __UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __lowerCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING _lowerCamelCase : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class __UpperCAmelCase ( _BaseAutoModelClass ): '''simple docstring''' __lowerCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING _lowerCamelCase : Optional[int] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
337
'''simple docstring''' import os def __a ( ) ->List[Any]: """simple docstring""" A = os.path.join(os.path.dirname(UpperCAmelCase ) , """num.txt""" ) with open(UpperCAmelCase ) as file_hand: return str(sum(int(UpperCAmelCase ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
337
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : Any=30 , __UpperCAmelCase : List[str]=400 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Union[str, Any]=1 / 255 , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=[0.5, 0.5, 0.5] , __UpperCAmelCase : List[str]=[0.5, 0.5, 0.5] , __UpperCAmelCase : List[Any]=True , ): '''simple docstring''' _A = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} _A = parent _A = batch_size _A = num_channels _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_rescale _A = rescale_factor _A = do_normalize _A = image_mean _A = image_std _A = do_pad def lowerCAmelCase ( self : Dict ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=False ): '''simple docstring''' if not batched: _A = image_inputs[0] if isinstance(__UpperCAmelCase , Image.Image ): _A , _A = image.size else: _A , _A = image.shape[1], image.shape[2] if w < h: _A = int(self.size["shortest_edge"] * h / w ) _A = self.size["shortest_edge"] elif w > h: _A = self.size["shortest_edge"] _A = int(self.size["shortest_edge"] * w / h ) else: _A = self.size["shortest_edge"] _A = self.size["shortest_edge"] else: _A = [] for image in image_inputs: _A , _A = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _A = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0] _A = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _UpperCAmelCase ( snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = DetrImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = DetrImageProcessingTester(self ) @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : str ): '''simple docstring''' _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCAmelCase , "image_mean" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "image_std" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "do_rescale" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "rescale_factor" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "size" ) ) self.assertTrue(hasattr(__UpperCAmelCase , "do_pad" ) ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} ) self.assertEqual(image_processor.do_pad , __UpperCAmelCase ) _A = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCAmelCase ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , __UpperCAmelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase ) _A = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values _A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: _A = json.loads(f.read() ) _A = {"image_id": 39769, "annotations": target} # encode them _A = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" ) _A = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors="pt" ) # verify pixel values _A = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["pixel_values"].shape , __UpperCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCAmelCase ) _A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCAmelCase ) ) # verify class_labels _A = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCAmelCase ) ) # verify orig_size _A = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCAmelCase ) ) # verify size _A = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCAmelCase ) ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: _A = json.loads(f.read() ) _A = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} _A = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them _A = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" ) _A = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors="pt" ) # verify pixel values _A = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["pixel_values"].shape , __UpperCAmelCase ) _A = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) ) # verify area _A = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCAmelCase ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCAmelCase ) _A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCAmelCase , atol=1E-3 ) ) # verify image_id _A = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCAmelCase ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCAmelCase ) ) # verify class_labels _A = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCAmelCase ) ) # verify masks _A = 822873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __UpperCAmelCase ) # verify orig_size _A = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCAmelCase ) ) # verify size _A = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCAmelCase ) )
79
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''canine''' def __init__( self : Dict , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : int=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[Any]=16384 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Dict=1E-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : int=0xE000 , __UpperCAmelCase : List[Any]=0xE001 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : int=16384 , __UpperCAmelCase : Union[str, Any]=128 , **__UpperCAmelCase : Dict , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = type_vocab_size _A = layer_norm_eps # Character config: _A = downsampling_rate _A = upsampling_kernel_size _A = num_hash_functions _A = num_hash_buckets _A = local_transformer_stride
79
1
"""simple docstring""" import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase__ : def __init__( self : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=13 , _lowerCamelCase : int=32 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[int]=[10, 20, 30, 40] , _lowerCamelCase : Dict=[2, 2, 3, 2] , _lowerCamelCase : Dict=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : Tuple=37 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Any=0.0_2 , _lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , _lowerCamelCase : Any=[2, 3, 4] , _lowerCamelCase : Any=None , ): _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = num_channels _snake_case = num_stages _snake_case = hidden_sizes _snake_case = depths _snake_case = is_training _snake_case = use_labels _snake_case = intermediate_size _snake_case = hidden_act _snake_case = num_labels _snake_case = initializer_range _snake_case = out_features _snake_case = out_indices _snake_case = scope def lowercase ( self : Dict ): _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.num_labels ) _snake_case = self.get_config() return config, pixel_values, labels def lowercase ( self : str ): return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowercase ( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[str] ): _snake_case = ConvNextVaModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() _snake_case = model(_lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowercase ( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] ): _snake_case = ConvNextVaForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() _snake_case = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase ( self : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple ): _snake_case = ConvNextVaBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() _snake_case = model(_lowerCamelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None _snake_case = None _snake_case = ConvNextVaBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() _snake_case = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowercase ( self : str ): _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict def lowercase ( self : int ): _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'''pixel_values''': pixel_values, '''labels''': labels} return config, inputs_dict @require_torch class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ): __a = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) __a = ( {"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification} if is_torch_available() else {} ) __a = False __a = False __a = False __a = False __a = False def lowercase ( self : str ): _snake_case = ConvNextVaModelTester(self ) _snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def lowercase ( self : List[str] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase ( self : Dict ): return @unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' ) def lowercase ( self : Dict ): pass @unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' ) def lowercase ( self : int ): pass @unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' ) def lowercase ( self : int ): pass def lowercase ( self : Union[str, Any] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case = True if model_class.__name__ in [ *get_values(_lowerCamelCase ), *get_values(_lowerCamelCase ), ]: continue _snake_case = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.train() _snake_case = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) _snake_case = model(**_lowerCamelCase ).loss loss.backward() def lowercase ( self : Dict ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_with_labels() _snake_case = False _snake_case = True if ( model_class.__name__ in [*get_values(_lowerCamelCase ), *get_values(_lowerCamelCase )] or not model_class.supports_gradient_checkpointing ): continue _snake_case = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.gradient_checkpointing_enable() model.train() _snake_case = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) _snake_case = model(**_lowerCamelCase ).loss loss.backward() def lowercase ( self : Optional[Any] ): _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(_lowerCamelCase ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def lowercase ( self : Optional[Any] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def lowercase ( self : Optional[int] ): def check_hidden_states_output(_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] ): _snake_case = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): _snake_case = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) _snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def lowercase ( self : List[str] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def lowercase ( self : str ): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = ConvNextVaModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def _UpperCAmelCase ( ) -> Optional[Any]: _snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def lowercase ( self : List[Any] ): return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None @slow def lowercase ( self : Optional[Any] ): _snake_case = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_lowerCamelCase ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = preprocessor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): _snake_case = model(**_lowerCamelCase ) # verify the logits _snake_case = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) _snake_case = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
40
"""simple docstring""" import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class lowerCAmelCase__ ( unittest.TestCase ): __a = MODEL_FOR_MASKED_LM_MAPPING __a = TF_MODEL_FOR_MASKED_LM_MAPPING def lowercase ( self : Optional[int] ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def lowercase ( self : Tuple ): _snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' ) _snake_case = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=6 ) , [ {'''sequence''': '''My name is grouped''', '''score''': 2.1e-05, '''token''': 38015, '''token_str''': ''' grouped'''}, {'''sequence''': '''My name is accuser''', '''score''': 2.1e-05, '''token''': 25506, '''token_str''': ''' accuser'''}, ] , ) _snake_case = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=6 ) , [ { '''sequence''': '''The largest city in France is grouped''', '''score''': 2.1e-05, '''token''': 38015, '''token_str''': ''' grouped''', }, { '''sequence''': '''The largest city in France is accuser''', '''score''': 2.1e-05, '''token''': 25506, '''token_str''': ''' accuser''', }, ] , ) _snake_case = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=6 ) , [ {'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13606, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Patrick''', '''score''': 2e-05, '''token''': 3499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 1.9e-05, '''token''': 2941, '''token_str''': ''' Te'''}, ] , ) @require_torch def lowercase ( self : List[str] ): _snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' ) _snake_case = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=6 ) , [ {'''sequence''': '''My name is Maul''', '''score''': 2.2e-05, '''token''': 35676, '''token_str''': ''' Maul'''}, {'''sequence''': '''My name isELS''', '''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS'''}, ] , ) _snake_case = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=6 ) , [ { '''sequence''': '''The largest city in France is Maul''', '''score''': 2.2e-05, '''token''': 35676, '''token_str''': ''' Maul''', }, {'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS'''}, ] , ) _snake_case = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=6 ) , [ {'''sequence''': '''My name is Patrick''', '''score''': 2.1e-05, '''token''': 3499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 2e-05, '''token''': 2941, '''token_str''': ''' Te'''}, {'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13606, '''token_str''': ''' Clara'''}, ] , ) _snake_case = unmasker('''My name is <mask> <mask>''' , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=6 ) , [ [ { '''score''': 2.2e-05, '''token''': 35676, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is Maul<mask></s>''', }, {'''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''}, ], [ { '''score''': 2.2e-05, '''token''': 35676, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is<mask> Maul</s>''', }, {'''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''}, ], ] , ) @require_torch_gpu def lowercase ( self : Optional[Any] ): _snake_case = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' ) # convert model to fp16 pipe.model.half() _snake_case = pipe('''Paris is the [MASK] of France.''' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) @slow @require_torch def lowercase ( self : Dict ): _snake_case = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' ) self.run_large_test(_lowerCamelCase ) @slow @require_tf def lowercase ( self : Tuple ): _snake_case = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' ) self.run_large_test(_lowerCamelCase ) def lowercase ( self : Tuple , _lowerCamelCase : Optional[int] ): _snake_case = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(_lowerCamelCase ) , [ {'''sequence''': '''My name is John''', '''score''': 0.0_0_8, '''token''': 610, '''token_str''': ''' John'''}, {'''sequence''': '''My name is Chris''', '''score''': 0.0_0_7, '''token''': 1573, '''token_str''': ''' Chris'''}, ] , ) _snake_case = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(_lowerCamelCase ) , [ { '''sequence''': '''The largest city in France is Paris''', '''score''': 0.2_5_1, '''token''': 2201, '''token_str''': ''' Paris''', }, { '''sequence''': '''The largest city in France is Lyon''', '''score''': 0.2_1_4, '''token''': 12790, '''token_str''': ''' Lyon''', }, ] , ) _snake_case = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(_lowerCamelCase ) , [ {'''sequence''': '''My name is Patrick''', '''score''': 0.0_0_5, '''token''': 3499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Clara''', '''score''': 0.0_0_0, '''token''': 13606, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Te''', '''score''': 0.0_0_0, '''token''': 2941, '''token_str''': ''' Te'''}, ] , ) @require_torch def lowercase ( self : str ): _snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' ) _snake_case = None _snake_case = None self.run_pipeline_test(_lowerCamelCase , [] ) @require_tf def lowercase ( self : Any ): _snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' ) _snake_case = None _snake_case = None self.run_pipeline_test(_lowerCamelCase , [] ) def lowercase ( self : int , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ): if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' ) _snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase ) _snake_case = [ f'''This is another {tokenizer.mask_token} test''', ] return fill_masker, examples def lowercase ( self : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] ): _snake_case = fill_masker.tokenizer _snake_case = fill_masker.model _snake_case = fill_masker( f'''This is a {tokenizer.mask_token}''' , ) self.assertEqual( _lowerCamelCase , [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ] , ) _snake_case = fill_masker([f'''This is a {tokenizer.mask_token}'''] ) self.assertEqual( _lowerCamelCase , [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ] , ) _snake_case = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] ) self.assertEqual( _lowerCamelCase , [ [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ], [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ], ] , ) with self.assertRaises(_lowerCamelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(_lowerCamelCase ): fill_masker('''This is''' ) self.run_test_top_k(_lowerCamelCase , _lowerCamelCase ) self.run_test_targets(_lowerCamelCase , _lowerCamelCase ) self.run_test_top_k_targets(_lowerCamelCase , _lowerCamelCase ) self.fill_mask_with_duplicate_targets_and_top_k(_lowerCamelCase , _lowerCamelCase ) self.fill_mask_with_multiple_masks(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ): _snake_case = tokenizer.get_vocab() _snake_case = sorted(vocab.keys() )[:2] # Pipeline argument _snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase , targets=_lowerCamelCase ) _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' ) self.assertEqual( _lowerCamelCase , [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ] , ) _snake_case = {vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs} , _lowerCamelCase ) _snake_case = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs} , set(_lowerCamelCase ) ) # Call argument _snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase ) _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCamelCase ) self.assertEqual( _lowerCamelCase , [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ] , ) _snake_case = {vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs} , _lowerCamelCase ) _snake_case = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs} , set(_lowerCamelCase ) ) # Score equivalence _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCamelCase ) _snake_case = [top_mask['''token_str'''] for top_mask in outputs] _snake_case = [top_mask['''score'''] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_lowerCamelCase ) == set(_lowerCamelCase ): _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCamelCase ) _snake_case = [top_mask['''score'''] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(_lowerCamelCase ) , nested_simplify(_lowerCamelCase ) ) # Raises with invalid with self.assertRaises(_lowerCamelCase ): _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(_lowerCamelCase ): _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[''''''] ) with self.assertRaises(_lowerCamelCase ): _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets='''''' ) def lowercase ( self : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Tuple ): _snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase , top_k=2 ) _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' ) self.assertEqual( _lowerCamelCase , [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ] , ) _snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase ) _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 ) self.assertEqual( _lowerCamelCase , [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ] , ) self.assertEqual(nested_simplify(_lowerCamelCase ) , nested_simplify(_lowerCamelCase ) ) def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : List[str] ): _snake_case = tokenizer.get_vocab() _snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase ) # top_k=2, ntargets=3 _snake_case = sorted(vocab.keys() )[:3] _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_lowerCamelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results _snake_case = [el['''token_str'''] for el in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x["score"] , reverse=_lowerCamelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_lowerCamelCase ).issubset(_lowerCamelCase ): _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_lowerCamelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(_lowerCamelCase ) , nested_simplify(_lowerCamelCase ) ) def lowercase ( self : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : int ): _snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase ) _snake_case = tokenizer.get_vocab() # String duplicates + id duplicates _snake_case = sorted(vocab.keys() )[:3] _snake_case = [targets[0], targets[1], targets[0], targets[2], targets[1]] _snake_case = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=_lowerCamelCase , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(_lowerCamelCase ) , 3 ) def lowercase ( self : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple ): _snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase ) _snake_case = fill_masker( f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 ) self.assertEqual( _lowerCamelCase , [ [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ], [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ], [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ], ] , )
40
1
"""simple docstring""" from __future__ import annotations def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): UpperCAmelCase = list(range(len(lowercase_ ) ) ) UpperCAmelCase = [v / w for v, w in zip(lowercase_ , lowercase_ )] index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ ) UpperCAmelCase = 0 UpperCAmelCase = [0] * len(lowercase_ ) for i in index: if weight[i] <= capacity: UpperCAmelCase = 1 max_value += value[i] capacity -= weight[i] else: UpperCAmelCase = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
78
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { """microsoft/beit-base-patch16-224-pt22k""": ( """https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json""" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = """beit""" def __init__( self :List[str] , lowercase_ :List[Any]=81_92 , lowercase_ :str=7_68 , lowercase_ :List[str]=12 , lowercase_ :Optional[int]=12 , lowercase_ :Dict=30_72 , lowercase_ :Tuple="gelu" , lowercase_ :Any=0.0 , lowercase_ :Optional[int]=0.0 , lowercase_ :Dict=0.02 , lowercase_ :int=1E-12 , lowercase_ :List[Any]=2_24 , lowercase_ :Dict=16 , lowercase_ :List[Any]=3 , lowercase_ :List[str]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Union[str, Any]=0.1 , lowercase_ :str=0.1 , lowercase_ :str=True , lowercase_ :List[str]=[3, 5, 7, 11] , lowercase_ :Optional[int]=[1, 2, 3, 6] , lowercase_ :str=True , lowercase_ :int=0.4 , lowercase_ :Union[str, Any]=2_56 , lowercase_ :int=1 , lowercase_ :Tuple=False , lowercase_ :Optional[int]=2_55 , **lowercase_ :str , ) -> Any: super().__init__(**lowercase_ ) UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = image_size UpperCAmelCase = patch_size UpperCAmelCase = num_channels UpperCAmelCase = use_mask_token UpperCAmelCase = use_absolute_position_embeddings UpperCAmelCase = use_relative_position_bias UpperCAmelCase = use_shared_relative_position_bias UpperCAmelCase = layer_scale_init_value UpperCAmelCase = drop_path_rate UpperCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) UpperCAmelCase = out_indices UpperCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) UpperCAmelCase = use_auxiliary_head UpperCAmelCase = auxiliary_loss_weight UpperCAmelCase = auxiliary_channels UpperCAmelCase = auxiliary_num_convs UpperCAmelCase = auxiliary_concat_input UpperCAmelCase = semantic_loss_ignore_index class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = version.parse("""1.11""" ) @property def UpperCAmelCase__ ( self :Dict ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def UpperCAmelCase__ ( self :Tuple ) -> float: return 1E-4
78
1
from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
355
import math __a :Union[str, Any] = 10 __a :Union[str, Any] = 7 __a :int = BALLS_PER_COLOUR * NUM_COLOURS def __snake_case ( __UpperCamelCase : int = 20 ): """simple docstring""" A_ = math.comb(__UpperCamelCase ,__UpperCamelCase ) A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase ) A_ = NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
329
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCAmelCase : Optional[Any] =logging.get_logger(__name__) __lowerCAmelCase : Optional[int] ={ 'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json', } class _lowercase ( A__ , A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = '''resnet''' SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''basic''', '''bottleneck'''] def __init__( self :List[Any] , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :int=64 , lowerCAmelCase__ :List[str]=[256, 512, 1_024, 2_048] , lowerCAmelCase__ :Any=[3, 4, 6, 3] , lowerCAmelCase__ :str="bottleneck" , lowerCAmelCase__ :List[Any]="relu" , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Dict=None , **lowerCAmelCase__ :Tuple , ) -> Optional[int]: super().__init__(**lowerCAmelCase__ ) if layer_type not in self.layer_types: raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' ) __SCREAMING_SNAKE_CASE : List[Any] = num_channels __SCREAMING_SNAKE_CASE : Optional[Any] = embedding_size __SCREAMING_SNAKE_CASE : Tuple = hidden_sizes __SCREAMING_SNAKE_CASE : List[str] = depths __SCREAMING_SNAKE_CASE : Tuple = layer_type __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : int = downsample_in_first_stage __SCREAMING_SNAKE_CASE : List[Any] = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase__ ) + 1 )] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = get_aligned_output_features_output_indices( out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names ) class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = version.parse('''1.11''' ) @property def __magic_name__( self :List[str] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __magic_name__( self :Optional[Any] ) -> float: return 1E-3
9
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def _UpperCamelCase ( *lowercase__ , lowercase__ = None , lowercase__=True , lowercase__=2 ): from .. import __version__ __SCREAMING_SNAKE_CASE : Optional[Any] = take_from __SCREAMING_SNAKE_CASE : List[str] = () if not isinstance(args[0] , lowercase__ ): __SCREAMING_SNAKE_CASE : List[Any] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ): raise ValueError( F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'''' F''' version {__version__} is >= {version_name}''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = None if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(lowercase__ ),) __SCREAMING_SNAKE_CASE : List[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.''' elif hasattr(lowercase__ , lowercase__ ): values += (getattr(lowercase__ , lowercase__ ),) __SCREAMING_SNAKE_CASE : List[str] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.''' elif deprecated_kwargs is None: __SCREAMING_SNAKE_CASE : str = F'''`{attribute}` is deprecated and will be removed in version {version_name}.''' if warning is not None: __SCREAMING_SNAKE_CASE : Any = warning + ''' ''' if standard_warn else '''''' warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1] __SCREAMING_SNAKE_CASE : Dict = call_frame.filename __SCREAMING_SNAKE_CASE : Optional[Any] = call_frame.lineno __SCREAMING_SNAKE_CASE : int = call_frame.function __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' ) if len(lowercase__ ) == 0: return elif len(lowercase__ ) == 1: return values[0] return values
9
1
"""simple docstring""" import sys def __SCREAMING_SNAKE_CASE ( lowercase__ ): """simple docstring""" A = len(lowercase__ ) A = [[0 for x in range(lowercase__ )] for x in range(lowercase__ )] A = [[0 for x in range(lowercase__ )] for x in range(lowercase__ )] for chain_length in range(2 , lowercase__ ): for a in range(1 , n - chain_length + 1 ): A = a + chain_length - 1 A = sys.maxsize for c in range(lowercase__ , lowercase__ ): A = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: A = cost A = c return matrix, sol def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ): """simple docstring""" if i == j: print("A" + str(lowercase__ ) , end=" " ) else: print("(" , end=" " ) print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] ) print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ ) print(")" , end=" " ) def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" A = [30, 35, 15, 5, 10, 20, 25] A = len(lowercase__ ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 A , A = matrix_chain_order(lowercase__ ) print("No. of Operation required: " + str(matrix[1][n - 1] ) ) print_optiomal_solution(lowercase__ , 1 , n - 1 ) if __name__ == "__main__": main()
352
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def __SCREAMING_SNAKE_CASE ( lowercase__=None ): """simple docstring""" if subparsers is not None: A = subparsers.add_parser("env" ) else: A = argparse.ArgumentParser("Accelerate env command" ) parser.add_argument( "--config_file" , default=lowercase__ , help="The config file to use for the default values in the launching script." ) if subparsers is not None: parser.set_defaults(func=lowercase__ ) return parser def __SCREAMING_SNAKE_CASE ( lowercase__ ): """simple docstring""" A = torch.__version__ A = torch.cuda.is_available() A = is_xpu_available() A = is_npu_available() A = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(lowercase__ ): A = load_config_from_file(args.config_file ).to_dict() A = { "`Accelerate` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""", "PyTorch XPU available": str(lowercase__ ), "PyTorch NPU available": str(lowercase__ ), "System RAM": F"""{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB""", } if pt_cuda_available: A = torch.cuda.get_device_name() print("\nCopy-and-paste the text below in your GitHub issue\n" ) print("\n".join([F"""- {prop}: {val}""" for prop, val in info.items()] ) ) print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" ) A = ( "\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(lowercase__ , lowercase__ ) else F"""\t{accelerate_config}""" ) print(lowercase__ ) A = accelerate_config return info def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" A = env_command_parser() A = parser.parse_args() env_command(lowercase__ ) return 0 if __name__ == "__main__": raise SystemExit(main())
57
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = DDIMPipeline lowerCAmelCase__ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS lowerCAmelCase__ : int = PipelineTesterMixin.required_optional_params - { """num_images_per_prompt""", """latents""", """callback""", """callback_steps""", } lowerCAmelCase__ : Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS lowerCAmelCase__ : int = False def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) lowercase__ = DDIMScheduler() lowercase__ = {'''unet''': unet, '''scheduler''': scheduler} return components def UpperCamelCase__ (self : Dict , UpperCamelCase : Any , UpperCamelCase : str=0 ): '''simple docstring''' if str(UpperCamelCase ).startswith('''mps''' ): lowercase__ = torch.manual_seed(UpperCamelCase ) else: lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) lowercase__ = { '''batch_size''': 1, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = '''cpu''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase ) pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = self.get_dummy_inputs(UpperCamelCase ) lowercase__ = pipe(**UpperCamelCase ).images lowercase__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3) ) lowercase__ = np.array( [1.0_00E00, 5.7_17E-01, 4.7_17E-01, 1.0_00E00, 0.0_00E00, 1.0_00E00, 3.0_00E-04, 0.0_00E00, 9.0_00E-04] ) lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase , 1E-3 ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def UpperCamelCase__ (self : int ): '''simple docstring''' super().test_save_load_local(expected_max_difference=3E-3 ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' super().test_save_load_optional_components(expected_max_difference=3E-3 ) def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = '''google/ddpm-cifar10-32''' lowercase__ = UNetaDModel.from_pretrained(UpperCamelCase ) lowercase__ = DDIMScheduler() lowercase__ = DDIMPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase ) ddim.to(UpperCamelCase ) ddim.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = torch.manual_seed(0 ) lowercase__ = ddim(generator=UpperCamelCase , eta=0.0 , output_type='''numpy''' ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowercase__ = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = '''google/ddpm-ema-bedroom-256''' lowercase__ = UNetaDModel.from_pretrained(UpperCamelCase ) lowercase__ = DDIMScheduler.from_pretrained(UpperCamelCase ) lowercase__ = DDIMPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase ) ddpm.to(UpperCamelCase ) ddpm.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = torch.manual_seed(0 ) lowercase__ = ddpm(generator=UpperCamelCase , output_type='''numpy''' ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowercase__ = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
2
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]: """simple docstring""" lowercase__ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A , A ) def _SCREAMING_SNAKE_CASE (A ) -> List[str]: """simple docstring""" lowercase__ ,lowercase__ = emb.weight.shape lowercase__ = nn.Linear(A , A , bias=A ) lowercase__ = emb.weight.data return lin_layer def _SCREAMING_SNAKE_CASE (A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> Union[str, Any]: """simple docstring""" lowercase__ = torch.load(A , map_location='''cpu''' )['''model'''] remove_ignore_keys_(A ) lowercase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0] lowercase__ = MBartConfig.from_pretrained(A , vocab_size=A ) if mbart_aa and finetuned: lowercase__ = '''relu''' lowercase__ = state_dict['''decoder.embed_tokens.weight'''] lowercase__ = MBartForConditionalGeneration(A ) model.model.load_state_dict(A ) if finetuned: lowercase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default='facebook/mbart-large-cc25', type=str, help='Which huggingface architecture to use: mbart-large', ) parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint') parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint') lowerCamelCase : Any = parser.parse_args() lowerCamelCase : List[str] = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
2
1
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets a_ : Dict = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' a_ : Optional[Any] = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n' a_ : int = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: if version.parse(scb.__version__) < version.parse('1.4.12'): raise ImportWarning( 'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n' 'You can install it with `pip install "sacrebleu>=1.4.12"`.') return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence'), 'references': datasets.Sequence(datasets.Value('string' , id='sequence') , id='references'), }) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[ 'https://github.com/jhclark/tercom', ] , ) def SCREAMING_SNAKE_CASE__ ( self , a , a , a = False , a = False , a = False , a = False , ) -> Optional[int]: SCREAMING_SNAKE_CASE = len(references[0]) if any(len(a) != references_per_prediction for refs in references): raise ValueError('Sacrebleu requires the same number of references for each prediction') SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(a)] SCREAMING_SNAKE_CASE = TER( normalized=a , no_punct=a , asian_support=a , case_sensitive=a , ) SCREAMING_SNAKE_CASE = sb_ter.corpus_score(a , a) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
327
from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer a_ : List[Any] = logging.get_logger(__name__) a_ : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} a_ : str = { 'vocab_file': { 'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json' }, 'merges_file': { 'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt' }, } a_ : List[Any] = {'allegro/herbert-base-cased': 5_14} a_ : Dict = {} class _snake_case ( A__ ): _lowercase : Dict = VOCAB_FILES_NAMES _lowercase : int = PRETRAINED_VOCAB_FILES_MAP _lowercase : Any = PRETRAINED_INIT_CONFIGURATION _lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Any = HerbertTokenizer def __init__( self , a=None , a=None , a=None , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a="</s>" , **a , ) -> Dict: super().__init__( a , a , tokenizer_file=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , sep_token=a , **a , ) def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]: SCREAMING_SNAKE_CASE = [self.cls_token_id] SCREAMING_SNAKE_CASE = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a , token_ids_a=a , already_has_special_tokens=a) if token_ids_a is None: return [1] + ([0] * len(a)) + [1] return [1] + ([0] * len(a)) + [1] + ([0] * len(a)) + [1] def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]: SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]: SCREAMING_SNAKE_CASE = self._tokenizer.model.save(a , name=a) return tuple(a)
327
1
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() _A = logging.get_logger(__name__) _A = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''', '''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''', '''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } _A = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __UpperCamelCase ( _A , _A , _A , _A , _A ): for attribute in key.split('''.''' ): lowerCAmelCase_ = getattr(_A , _A ) if weight_type is not None: lowerCAmelCase_ = getattr(_A , _A ).shape else: lowerCAmelCase_ = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": lowerCAmelCase_ = value elif weight_type == "weight_g": lowerCAmelCase_ = value elif weight_type == "weight_v": lowerCAmelCase_ = value elif weight_type == "bias": lowerCAmelCase_ = value else: lowerCAmelCase_ = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = [] lowerCAmelCase_ = fairseq_model.state_dict() lowerCAmelCase_ = hf_model.feature_extractor for name, value in fairseq_dict.items(): lowerCAmelCase_ = False if "conv_layers" in name: load_conv_layer( _A , _A , _A , _A , hf_model.config.feat_extract_norm == '''group''' , ) lowerCAmelCase_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowerCAmelCase_ = True if "*" in mapped_key: lowerCAmelCase_ = name.split(_A )[0].split('''.''' )[-2] lowerCAmelCase_ = mapped_key.replace('''*''' , _A ) if "weight_g" in name: lowerCAmelCase_ = '''weight_g''' elif "weight_v" in name: lowerCAmelCase_ = '''weight_v''' elif "bias" in name and "relative_attention_bias" not in name: lowerCAmelCase_ = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCAmelCase_ = '''weight''' else: lowerCAmelCase_ = None set_recursively(_A , _A , _A , _A , _A ) continue if not is_used: unused_weights.append(_A ) logger.warning(f"Unused weights: {unused_weights}" ) def __UpperCamelCase ( _A , _A , _A , _A , _A ): lowerCAmelCase_ = full_name.split('''conv_layers.''' )[-1] lowerCAmelCase_ = name.split('''.''' ) lowerCAmelCase_ = int(items[0] ) lowerCAmelCase_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) lowerCAmelCase_ = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) lowerCAmelCase_ = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) lowerCAmelCase_ = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) lowerCAmelCase_ = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_A ) @torch.no_grad() def __UpperCamelCase ( _A , _A , _A=None ): # load the pre-trained checkpoints lowerCAmelCase_ = torch.load(_A ) lowerCAmelCase_ = WavLMConfigOrig(checkpoint['''cfg'''] ) lowerCAmelCase_ = WavLMOrig(_A ) model.load_state_dict(checkpoint['''model'''] ) model.eval() if config_path is not None: lowerCAmelCase_ = WavLMConfig.from_pretrained(_A ) else: lowerCAmelCase_ = WavLMConfig() lowerCAmelCase_ = WavLMModel(_A ) recursively_load_weights(_A , _A ) hf_wavlm.save_pretrained(_A ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') _A = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
278
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor _A = logging.get_logger(__name__) class A ( __UpperCAmelCase ): def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''', UpperCamelCase__, ) super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
278
1
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor lowerCamelCase_ = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def UpperCamelCase( lowercase_ ) -> Optional[int]: '''simple docstring''' if isinstance(a__ , torch.Tensor ): return image elif isinstance(a__ , PIL.Image.Image ): snake_case_ = [image] snake_case_ = [trans(img.convert("""RGB""" ) ) for img in image] snake_case_ = torch.stack(a__ ) return image class __lowerCamelCase ( __snake_case ): def __init__( self , lowerCamelCase , lowerCamelCase ) -> Optional[int]: super().__init__() # make sure scheduler can always be converted to DDIM snake_case_ = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowercase_ , scheduler=lowercase_ ) def lowerCAmelCase_ ( self , lowerCamelCase ) -> Tuple: if strength < 0 or strength > 1: raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' ) def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]: snake_case_ = min(int(num_inference_steps * strength ) , lowercase_ ) snake_case_ = max(num_inference_steps - init_timestep , 0 ) snake_case_ = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ) -> Any: if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' ) snake_case_ = image.to(device=lowercase_ , dtype=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) snake_case_ = init_latents.shape snake_case_ = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) # get latents print("""add noise to latents at timestep""" , lowercase_ ) snake_case_ = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ ) snake_case_ = init_latents return latents @torch.no_grad() def __call__( self , lowerCamelCase = None , lowerCamelCase = 0.8 , lowerCamelCase = 1 , lowerCamelCase = None , lowerCamelCase = 0.0 , lowerCamelCase = 50 , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , ) -> Union[ImagePipelineOutput, Tuple]: self.check_inputs(lowercase_ ) # 2. Preprocess image snake_case_ = preprocess(lowercase_ ) # 3. set timesteps self.scheduler.set_timesteps(lowercase_ , device=self.device ) snake_case_ , snake_case_ = self.get_timesteps(lowercase_ , lowercase_ , self.device ) snake_case_ = timesteps[:1].repeat(lowercase_ ) # 4. Prepare latent variables snake_case_ = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ ) snake_case_ = latents # 5. Denoising loop for t in self.progress_bar(lowercase_ ): # 1. predict noise model_output snake_case_ = self.unet(lowercase_ , lowercase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 snake_case_ = self.scheduler.step( lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample snake_case_ = (image / 2 + 0.5).clamp(0 , 1 ) snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": snake_case_ = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=lowercase_ )
368
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Any: '''simple docstring''' snake_case_ = AutoConfig.from_pretrained(lowercase_ ) snake_case_ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase_ ) snake_case_ = checkpoints.load_tax_checkpoint(lowercase_ ) snake_case_ = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""] if config.model_type == "t5": snake_case_ = """SelfAttention""" if config.model_type == "longt5" and config.encoder_attention_type == "local": snake_case_ = """LocalSelfAttention""" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": snake_case_ = """TransientGlobalSelfAttention""" else: raise ValueError( """Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`""" """ attribute with a value from ['local', 'transient-global].""" ) # Encoder for layer_index in range(config.num_layers ): snake_case_ = f'''layers_{str(lowercase_ )}''' # Self-Attention snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""] # Layer Normalization snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""] if split_mlp_wi: snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning snake_case_ = flax_model.params["""encoder"""]["""block"""][str(lowercase_ )]["""layer"""] snake_case_ = tax_attention_key snake_case_ = tax_attention_out snake_case_ = tax_attention_query snake_case_ = tax_attention_value snake_case_ = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": snake_case_ = tax_global_layer_norm if split_mlp_wi: snake_case_ = tax_mlp_wi_a snake_case_ = tax_mlp_wi_a else: snake_case_ = tax_mlp_wi snake_case_ = tax_mlp_wo snake_case_ = tax_mlp_layer_norm snake_case_ = flax_model_encoder_layer_block # Only for layer 0: snake_case_ = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T snake_case_ = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": snake_case_ = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T snake_case_ = tax_encoder_global_rel_embedding # Assigning snake_case_ = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""] snake_case_ = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): snake_case_ = f'''layers_{str(lowercase_ )}''' # Self-Attention snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""] # Layer Normalization snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][ """scale""" ] # Encoder-Decoder-Attention snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""] snake_case_ = tax_enc_dec_attention_module["""key"""]["""kernel"""] snake_case_ = tax_enc_dec_attention_module["""out"""]["""kernel"""] snake_case_ = tax_enc_dec_attention_module["""query"""]["""kernel"""] snake_case_ = tax_enc_dec_attention_module["""value"""]["""kernel"""] # Layer Normalization snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""] # MLP if split_mlp_wi: snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning snake_case_ = flax_model.params["""decoder"""]["""block"""][str(lowercase_ )]["""layer"""] snake_case_ = tax_attention_key snake_case_ = tax_attention_out snake_case_ = tax_attention_query snake_case_ = tax_attention_value snake_case_ = tax_pre_attention_layer_norm snake_case_ = tax_enc_dec_attention_key snake_case_ = tax_enc_dec_attention_out snake_case_ = tax_enc_dec_attention_query snake_case_ = tax_enc_dec_attention_value snake_case_ = tax_cross_layer_norm if split_mlp_wi: snake_case_ = tax_mlp_wi_a snake_case_ = tax_mlp_wi_a else: snake_case_ = tax_mlp_wi snake_case_ = tax_mlp_wo snake_case_ = txa_mlp_layer_norm snake_case_ = flax_model_decoder_layer_block # Decoder Normalization snake_case_ = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""] snake_case_ = txa_decoder_norm # Only for layer 0: snake_case_ = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T snake_case_ = tax_decoder_rel_embedding # Token Embeddings snake_case_ = tax_model["""target"""]["""token_embedder"""]["""embedding"""] snake_case_ = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: snake_case_ = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""] flax_model.save_pretrained(lowercase_ ) print("""T5X Model was sucessfully converted!""" ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) lowerCamelCase_ = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
34
0
"""simple docstring""" def _snake_case ( lowercase__ ): assert column_title.isupper() _lowerCamelCase : List[str] = 0 _lowerCamelCase : Any = len(lowercase__ ) - 1 _lowerCamelCase : Optional[Any] = 0 while index >= 0: _lowerCamelCase : Union[str, Any] = (ord(column_title[index] ) - 64) * pow(26 , lowercase__ ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
96
"""simple docstring""" import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = DDIMPipeline lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS lowerCamelCase__ = PipelineTesterMixin.required_optional_params - { """num_images_per_prompt""", """latents""", """callback""", """callback_steps""", } lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS lowerCamelCase__ = False def A_ ( self ): torch.manual_seed(0 ) _lowerCamelCase : List[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) _lowerCamelCase : List[str] = DDIMScheduler() _lowerCamelCase : Optional[int] = {'unet': unet, 'scheduler': scheduler} return components def A_ ( self , lowercase , lowercase=0 ): if str(lowercase ).startswith('mps' ): _lowerCamelCase : Dict = torch.manual_seed(lowercase ) else: _lowerCamelCase : List[str] = torch.Generator(device=lowercase ).manual_seed(lowercase ) _lowerCamelCase : Tuple = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def A_ ( self ): _lowerCamelCase : Any = 'cpu' _lowerCamelCase : Tuple = self.get_dummy_components() _lowerCamelCase : Optional[Any] = self.pipeline_class(**lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : str = self.get_dummy_inputs(lowercase ) _lowerCamelCase : int = pipe(**lowercase ).images _lowerCamelCase : Any = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3) ) _lowerCamelCase : Tuple = np.array( [1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] ) _lowerCamelCase : str = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase , 1E-3 ) def A_ ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def A_ ( self ): super().test_save_load_local(expected_max_difference=3E-3 ) def A_ ( self ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def A_ ( self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : Optional[Any] = 'google/ddpm-cifar10-32' _lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(lowercase ) _lowerCamelCase : Dict = DDIMScheduler() _lowerCamelCase : Dict = DDIMPipeline(unet=lowercase , scheduler=lowercase ) ddim.to(lowercase ) ddim.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : List[str] = torch.manual_seed(0 ) _lowerCamelCase : str = ddim(generator=lowercase , eta=0.0 , output_type='numpy' ).images _lowerCamelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _lowerCamelCase : List[Any] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A_ ( self ): _lowerCamelCase : Optional[int] = 'google/ddpm-ema-bedroom-256' _lowerCamelCase : str = UNetaDModel.from_pretrained(lowercase ) _lowerCamelCase : str = DDIMScheduler.from_pretrained(lowercase ) _lowerCamelCase : Optional[int] = DDIMPipeline(unet=lowercase , scheduler=lowercase ) ddpm.to(lowercase ) ddpm.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : Tuple = torch.manual_seed(0 ) _lowerCamelCase : int = ddpm(generator=lowercase , output_type='numpy' ).images _lowerCamelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _lowerCamelCase : str = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
96
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def lowerCamelCase__ ( A : List[str] ): '''simple docstring''' return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def lowerCamelCase__ ( A : str ): '''simple docstring''' UpperCAmelCase = create_tensor(A ) UpperCAmelCase = gather(A ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def lowerCamelCase__ ( A : int ): '''simple docstring''' UpperCAmelCase = [state.process_index] UpperCAmelCase = gather_object(A ) assert len(A ) == state.num_processes, f"""{gathered_obj}, {len(A )} != {state.num_processes}""" assert gathered_obj == list(range(state.num_processes ) ), f"""{gathered_obj} != {list(range(state.num_processes ) )}""" def lowerCamelCase__ ( A : List[str] ): '''simple docstring''' UpperCAmelCase = create_tensor(A ) UpperCAmelCase = broadcast(A ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def lowerCamelCase__ ( A : Any ): '''simple docstring''' if state.is_main_process: UpperCAmelCase = torch.arange(state.num_processes + 1 ).to(state.device ) else: UpperCAmelCase = torch.arange(state.num_processes ).to(state.device ) UpperCAmelCase = pad_across_processes(A ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def lowerCamelCase__ ( A : Dict ): '''simple docstring''' if state.num_processes != 2: return UpperCAmelCase = create_tensor(A ) UpperCAmelCase = reduce(A , '''sum''' ) UpperCAmelCase = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(A , A ), f"""{reduced_tensor} != {truth_tensor}""" def lowerCamelCase__ ( A : Optional[int] ): '''simple docstring''' if state.num_processes != 2: return UpperCAmelCase = create_tensor(A ) UpperCAmelCase = reduce(A , '''mean''' ) UpperCAmelCase = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(A , A ), f"""{reduced_tensor} != {truth_tensor}""" def lowerCamelCase__ ( A : Optional[int] ): '''simple docstring''' main() def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase = PartialState() state.print(f"""State: {state}""" ) state.print('''testing gather''' ) test_gather(A ) state.print('''testing gather_object''' ) test_gather_object(A ) state.print('''testing broadcast''' ) test_broadcast(A ) state.print('''testing pad_across_processes''' ) test_pad_across_processes(A ) state.print('''testing reduce_sum''' ) test_reduce_sum(A ) state.print('''testing reduce_mean''' ) test_reduce_mean(A ) if __name__ == "__main__": main()
91
'''simple docstring''' def lowerCamelCase__ ( A : int , A : int ): '''simple docstring''' return int(input_a == input_a == 0 ) def lowerCamelCase__ ( ): '''simple docstring''' print('''Truth Table of NOR Gate:''' ) print('''| Input 1 | Input 2 | Output |''' ) print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
91
1
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class A ( UpperCamelCase_ , unittest.TestCase ): UpperCamelCase__ : str =XLMRobertaTokenizer UpperCamelCase__ : List[Any] =XLMRobertaTokenizerFast UpperCamelCase__ : Optional[int] =True UpperCamelCase__ : str =True def lowerCamelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _lowerCamelCase : List[Any] =XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase ( self : int ) -> int: """simple docstring""" _lowerCamelCase : int ='<pad>' _lowerCamelCase : List[str] =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ ) def lowerCamelCase ( self : Dict ) -> Any: """simple docstring""" _lowerCamelCase : Union[str, Any] =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(lowercase_ ) , 1002 ) def lowerCamelCase ( self : Dict ) -> Tuple: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1002 ) def lowerCamelCase ( self : Optional[int] ) -> int: """simple docstring""" _lowerCamelCase : str =XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ ) _lowerCamelCase : Any =tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCamelCase : List[str] =tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) _lowerCamelCase : List[str] =tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) _lowerCamelCase : Optional[int] =tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def lowerCamelCase ( self : int ) -> str: """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _lowerCamelCase : List[Any] =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _lowerCamelCase : Dict =self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) _lowerCamelCase : Optional[Any] =self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) _lowerCamelCase : List[str] =tempfile.mkdtemp() _lowerCamelCase : str =tokenizer_r.save_pretrained(lowercase_ ) _lowerCamelCase : Union[str, Any] =tokenizer_p.save_pretrained(lowercase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) _lowerCamelCase : Optional[int] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(lowercase_ , lowercase_ ) # Checks everything loads correctly in the same way _lowerCamelCase : int =tokenizer_r.from_pretrained(lowercase_ ) _lowerCamelCase : Any =tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_ , lowercase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowercase_ ) # Save tokenizer rust, legacy_format=True _lowerCamelCase : Tuple =tempfile.mkdtemp() _lowerCamelCase : Optional[int] =tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ ) _lowerCamelCase : Optional[int] =tokenizer_p.save_pretrained(lowercase_ ) # Checks it save with the same files self.assertSequenceEqual(lowercase_ , lowercase_ ) # Checks everything loads correctly in the same way _lowerCamelCase : Union[str, Any] =tokenizer_r.from_pretrained(lowercase_ ) _lowerCamelCase : Optional[int] =tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_ , lowercase_ ) ) shutil.rmtree(lowercase_ ) # Save tokenizer rust, legacy_format=False _lowerCamelCase : Union[str, Any] =tempfile.mkdtemp() _lowerCamelCase : List[Any] =tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ ) _lowerCamelCase : Tuple =tokenizer_p.save_pretrained(lowercase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowerCamelCase : str =tokenizer_r.from_pretrained(lowercase_ ) _lowerCamelCase : Union[str, Any] =tokenizer_p.from_pretrained(lowercase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowercase_ , lowercase_ ) ) shutil.rmtree(lowercase_ ) @cached_property def lowerCamelCase ( self : int ) -> Optional[int]: """simple docstring""" return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' ) def lowerCamelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowercase_ , f.name ) _lowerCamelCase : List[Any] =XLMRobertaTokenizer(f.name , keep_accents=lowercase_ ) _lowerCamelCase : List[str] =pickle.dumps(lowercase_ ) pickle.loads(lowercase_ ) def lowerCamelCase ( self : List[Any] ) -> Tuple: """simple docstring""" if not self.test_rust_tokenizer: return _lowerCamelCase : List[Any] =self.get_tokenizer() _lowerCamelCase : Union[str, Any] =self.get_rust_tokenizer() _lowerCamelCase : Union[str, Any] ='I was born in 92000, and this is falsé.' _lowerCamelCase : Optional[int] =tokenizer.tokenize(lowercase_ ) _lowerCamelCase : int =rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) _lowerCamelCase : Tuple =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) _lowerCamelCase : Tuple =rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) _lowerCamelCase : int =self.get_rust_tokenizer() _lowerCamelCase : Tuple =tokenizer.encode(lowercase_ ) _lowerCamelCase : Optional[int] =rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) @slow def lowerCamelCase ( self : str ) -> Tuple: """simple docstring""" _lowerCamelCase : Any ='Hello World!' _lowerCamelCase : List[Any] =[0, 3_5378, 6661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @slow def lowerCamelCase ( self : Tuple ) -> Dict: """simple docstring""" _lowerCamelCase : Dict =( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) _lowerCamelCase : Tuple =[ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 17_9459, 12_4850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 1_0114, 711, 152, 20, 6, 5, 2_2376, 642, 1221, 1_5190, 3_4153, 450, 5608, 959, 1119, 5_7702, 136, 186, 47, 1098, 2_9367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6044, 237, 6284, 5_0901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @slow def lowerCamelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" _lowerCamelCase : Any ={'input_ids': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
199
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece class A ( UpperCamelCase_ , unittest.TestCase ): UpperCamelCase__ : str =XLMProphetNetTokenizer UpperCamelCase__ : Any =False UpperCamelCase__ : Optional[Any] =True def lowerCamelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _lowerCamelCase : Union[str, Any] =XLMProphetNetTokenizer(lowercase_ , keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase ( self : int ) -> List[str]: """simple docstring""" _lowerCamelCase : Tuple ='[PAD]' _lowerCamelCase : Dict =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ ) def lowerCamelCase ( self : Optional[Any] ) -> int: """simple docstring""" _lowerCamelCase : int =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '[PAD]' ) self.assertEqual(vocab_keys[1] , '[CLS]' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(lowercase_ ) , 1012 ) def lowerCamelCase ( self : Optional[int] ) -> Any: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1012 ) def lowerCamelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" _lowerCamelCase : List[Any] =XLMProphetNetTokenizer(lowercase_ , keep_accents=lowercase_ ) _lowerCamelCase : Any =tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _lowerCamelCase : Dict =tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) _lowerCamelCase : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) _lowerCamelCase : int =tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '[UNK]', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '[UNK]', '.', ] , ) @cached_property def lowerCamelCase ( self : Any ) -> Optional[Any]: """simple docstring""" return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' ) @slow def lowerCamelCase ( self : List[str] ) -> Any: """simple docstring""" _lowerCamelCase : Optional[int] ='Hello World!' _lowerCamelCase : Optional[int] =[3_5389, 6672, 49, 2] self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @slow def lowerCamelCase ( self : Optional[int] ) -> Dict: """simple docstring""" _lowerCamelCase : Dict ={'input_ids': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
199
1
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device lowercase =False class __magic_name__ ( unittest.TestCase ): pass @nightly @require_torch_gpu class __magic_name__ ( unittest.TestCase ): def lowerCAmelCase ( self) -> List[Any]: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase : str =VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion') # remove text_unet pipe.remove_unused_weights() pipe.to(snake_case) pipe.set_progress_bar_config(disable=snake_case) _UpperCAmelCase : Dict ='A painting of a squirrel eating a burger ' _UpperCAmelCase : Dict =torch.manual_seed(0) _UpperCAmelCase : Tuple =pipe( prompt=snake_case , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy').images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(snake_case) _UpperCAmelCase : Any =VersatileDiffusionTextToImagePipeline.from_pretrained(snake_case) pipe.to(snake_case) pipe.set_progress_bar_config(disable=snake_case) _UpperCAmelCase : Optional[Any] =generator.manual_seed(0) _UpperCAmelCase : Optional[Any] =pipe( prompt=snake_case , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy').images assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass" def lowerCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCAmelCase : Optional[Any] =VersatileDiffusionTextToImagePipeline.from_pretrained( 'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa) pipe.to(snake_case) pipe.set_progress_bar_config(disable=snake_case) _UpperCAmelCase : List[str] ='A painting of a squirrel eating a burger ' _UpperCAmelCase : Union[str, Any] =torch.manual_seed(0) _UpperCAmelCase : Any =pipe( prompt=snake_case , generator=snake_case , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy').images _UpperCAmelCase : Optional[Any] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : Optional[int] =np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
242
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class __magic_name__ ( lowerCAmelCase ): UpperCAmelCase =42 UpperCAmelCase =42 class __magic_name__ ( nn.Module ): UpperCAmelCase =42 UpperCAmelCase =(1_6, 3_2, 9_6, 2_5_6) UpperCAmelCase =jnp.floataa def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : str =nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _UpperCAmelCase : Tuple =[] for i in range(len(self.block_out_channels) - 1): _UpperCAmelCase : Optional[int] =self.block_out_channels[i] _UpperCAmelCase : List[Any] =self.block_out_channels[i + 1] _UpperCAmelCase : Tuple =nn.Conv( snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(snake_case) _UpperCAmelCase : Optional[int] =nn.Conv( snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(snake_case) _UpperCAmelCase : Dict =blocks _UpperCAmelCase : Tuple =nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , snake_case) -> List[str]: '''simple docstring''' _UpperCAmelCase : int =self.conv_in(snake_case) _UpperCAmelCase : Any =nn.silu(snake_case) for block in self.blocks: _UpperCAmelCase : Optional[Any] =block(snake_case) _UpperCAmelCase : Union[str, Any] =nn.silu(snake_case) _UpperCAmelCase : str =self.conv_out(snake_case) return embedding @flax_register_to_config class __magic_name__ ( nn.Module ,lowerCAmelCase ,lowerCAmelCase ): UpperCAmelCase =3_2 UpperCAmelCase =4 UpperCAmelCase =( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) UpperCAmelCase =False UpperCAmelCase =(3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) UpperCAmelCase =2 UpperCAmelCase =8 UpperCAmelCase =None UpperCAmelCase =1_2_8_0 UpperCAmelCase =0.0 UpperCAmelCase =False UpperCAmelCase =jnp.floataa UpperCAmelCase =True UpperCAmelCase =0 UpperCAmelCase ="rgb" UpperCAmelCase =(1_6, 3_2, 9_6, 2_5_6) def lowerCAmelCase ( self , snake_case) -> FrozenDict: '''simple docstring''' # init input tensors _UpperCAmelCase : Any =(1, self.in_channels, self.sample_size, self.sample_size) _UpperCAmelCase : Optional[Any] =jnp.zeros(snake_case , dtype=jnp.floataa) _UpperCAmelCase : Optional[int] =jnp.ones((1,) , dtype=jnp.intaa) _UpperCAmelCase : str =jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa) _UpperCAmelCase : Optional[Any] =(1, 3, self.sample_size * 8, self.sample_size * 8) _UpperCAmelCase : int =jnp.zeros(snake_case , dtype=jnp.floataa) _UpperCAmelCase , _UpperCAmelCase : List[Any] =jax.random.split(snake_case) _UpperCAmelCase : str ={'params': params_rng, 'dropout': dropout_rng} return self.init(snake_case , snake_case , snake_case , snake_case , snake_case)["params"] def lowerCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCAmelCase : Optional[Any] =self.block_out_channels _UpperCAmelCase : Tuple =block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. _UpperCAmelCase : Optional[Any] =self.num_attention_heads or self.attention_head_dim # input _UpperCAmelCase : Tuple =nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time _UpperCAmelCase : Union[str, Any] =FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift) _UpperCAmelCase : str =FlaxTimestepEmbedding(snake_case , dtype=self.dtype) _UpperCAmelCase : Optional[Any] =FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) _UpperCAmelCase : Optional[int] =self.only_cross_attention if isinstance(snake_case , snake_case): _UpperCAmelCase : Dict =(only_cross_attention,) * len(self.down_block_types) if isinstance(snake_case , snake_case): _UpperCAmelCase : Optional[Any] =(num_attention_heads,) * len(self.down_block_types) # down _UpperCAmelCase : int =[] _UpperCAmelCase : Optional[int] =[] _UpperCAmelCase : List[str] =block_out_channels[0] _UpperCAmelCase : int =nn.Conv( snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(snake_case) for i, down_block_type in enumerate(self.down_block_types): _UpperCAmelCase : Tuple =output_channel _UpperCAmelCase : Dict =block_out_channels[i] _UpperCAmelCase : str =i == len(snake_case) - 1 if down_block_type == "CrossAttnDownBlock2D": _UpperCAmelCase : Tuple =FlaxCrossAttnDownBlockaD( in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: _UpperCAmelCase : Optional[Any] =FlaxDownBlockaD( in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(snake_case) for _ in range(self.layers_per_block): _UpperCAmelCase : Tuple =nn.Conv( snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(snake_case) if not is_final_block: _UpperCAmelCase : List[str] =nn.Conv( snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(snake_case) _UpperCAmelCase : List[Any] =down_blocks _UpperCAmelCase : Optional[Any] =controlnet_down_blocks # mid _UpperCAmelCase : int =block_out_channels[-1] _UpperCAmelCase : Optional[Any] =FlaxUNetMidBlockaDCrossAttn( in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) _UpperCAmelCase : Optional[int] =nn.Conv( snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ) -> Union[FlaxControlNetOutput, Tuple]: '''simple docstring''' _UpperCAmelCase : Union[str, Any] =self.controlnet_conditioning_channel_order if channel_order == "bgr": _UpperCAmelCase : Optional[int] =jnp.flip(snake_case , axis=1) # 1. time if not isinstance(snake_case , jnp.ndarray): _UpperCAmelCase : Optional[int] =jnp.array([timesteps] , dtype=jnp.intaa) elif isinstance(snake_case , jnp.ndarray) and len(timesteps.shape) == 0: _UpperCAmelCase : str =timesteps.astype(dtype=jnp.floataa) _UpperCAmelCase : Dict =jnp.expand_dims(snake_case , 0) _UpperCAmelCase : int =self.time_proj(snake_case) _UpperCAmelCase : Any =self.time_embedding(snake_case) # 2. pre-process _UpperCAmelCase : str =jnp.transpose(snake_case , (0, 2, 3, 1)) _UpperCAmelCase : Any =self.conv_in(snake_case) _UpperCAmelCase : List[str] =jnp.transpose(snake_case , (0, 2, 3, 1)) _UpperCAmelCase : Optional[int] =self.controlnet_cond_embedding(snake_case) sample += controlnet_cond # 3. down _UpperCAmelCase : Tuple =(sample,) for down_block in self.down_blocks: if isinstance(snake_case , snake_case): _UpperCAmelCase , _UpperCAmelCase : Dict =down_block(snake_case , snake_case , snake_case , deterministic=not train) else: _UpperCAmelCase , _UpperCAmelCase : Dict =down_block(snake_case , snake_case , deterministic=not train) down_block_res_samples += res_samples # 4. mid _UpperCAmelCase : List[Any] =self.mid_block(snake_case , snake_case , snake_case , deterministic=not train) # 5. contronet blocks _UpperCAmelCase : Union[str, Any] =() for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks): _UpperCAmelCase : List[str] =controlnet_block(snake_case) controlnet_down_block_res_samples += (down_block_res_sample,) _UpperCAmelCase : Optional[int] =controlnet_down_block_res_samples _UpperCAmelCase : List[str] =self.controlnet_mid_block(snake_case) # 6. scaling _UpperCAmelCase : Tuple =[sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=snake_case , mid_block_res_sample=snake_case)
242
1
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class snake_case__( lowerCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : torch.FloatTensor class snake_case__( lowerCAmelCase__, lowerCAmelCase__ ): '''simple docstring''' @register_to_config def __init__( self , __lowercase = 1_6 , __lowercase = 8_8 , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = 3_2 , __lowercase = None , __lowercase = False , __lowercase = None , __lowercase = "geglu" , __lowercase = True , __lowercase = True , ) -> Any: super().__init__() lowerCAmelCase_ : Tuple = num_attention_heads lowerCAmelCase_ : int = attention_head_dim lowerCAmelCase_ : List[Any] = num_attention_heads * attention_head_dim lowerCAmelCase_ : Tuple = in_channels lowerCAmelCase_ : List[Any] = torch.nn.GroupNorm(num_groups=lowercase_ , num_channels=lowercase_ , eps=1e-6 , affine=lowercase_ ) lowerCAmelCase_ : int = nn.Linear(lowercase_ , lowercase_ ) # 3. Define transformers blocks lowerCAmelCase_ : Optional[int] = nn.ModuleList( [ BasicTransformerBlock( lowercase_ , lowercase_ , lowercase_ , dropout=lowercase_ , cross_attention_dim=lowercase_ , activation_fn=lowercase_ , attention_bias=lowercase_ , double_self_attention=lowercase_ , norm_elementwise_affine=lowercase_ , ) for d in range(lowercase_ ) ] ) lowerCAmelCase_ : Tuple = nn.Linear(lowercase_ , lowercase_ ) def lowercase_ ( self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=1 , __lowercase=None , __lowercase = True , ) -> List[str]: lowerCAmelCase_ : List[Any] = hidden_states.shape lowerCAmelCase_ : Optional[int] = batch_frames // num_frames lowerCAmelCase_ : Dict = hidden_states lowerCAmelCase_ : List[Any] = hidden_states[None, :].reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowerCAmelCase_ : Union[str, Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) lowerCAmelCase_ : Optional[int] = self.norm(lowercase_ ) lowerCAmelCase_ : Optional[int] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , lowercase_ , lowercase_ ) lowerCAmelCase_ : Any = self.proj_in(lowercase_ ) # 2. Blocks for block in self.transformer_blocks: lowerCAmelCase_ : Union[str, Any] = block( lowercase_ , encoder_hidden_states=lowercase_ , timestep=lowercase_ , cross_attention_kwargs=lowercase_ , class_labels=lowercase_ , ) # 3. Output lowerCAmelCase_ : Dict = self.proj_out(lowercase_ ) lowerCAmelCase_ : Dict = ( hidden_states[None, None, :] .reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) lowerCAmelCase_ : Union[str, Any] = hidden_states.reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowerCAmelCase_ : Optional[int] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=lowercase_ )
262
"""simple docstring""" import sys lowercase__ : Dict = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __lowercase ( _a ): snake_case_ : List[Any] = 1 for digit in s: product *= int(_a ) return product def __lowercase ( _a = N ): snake_case_ : Optional[int] = -sys.maxsize - 1 snake_case_ : str = n[:13] snake_case_ : List[Any] = 13 while cur_index < len(_a ) - 13: if int(n[cur_index] ) >= int(substr[0] ): snake_case_ : int = substr[1:] + n[cur_index] cur_index += 1 else: snake_case_ : Optional[Any] = max(_a , str_eval(_a ) ) snake_case_ : Any = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(f'{solution() = }')
264
0
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :Optional[int] = ["""image_processor""", """tokenizer"""] __magic_name__ :Tuple = """ViltImageProcessor""" __magic_name__ :Optional[int] = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , __UpperCAmelCase , ) lowerCAmelCase__ :Tuple = kwargs.pop('feature_extractor' ) lowerCAmelCase__ :Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = self.image_processor def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :int = self.tokenizer( text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , ) # add pixel_values + pixel_mask lowerCAmelCase__ :Optional[Any] = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) encoding.update(__UpperCAmelCase ) return encoding def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Any = self.tokenizer.model_input_names lowerCAmelCase__ :List[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def snake_case ( self ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , ) return self.image_processor_class @property def snake_case ( self ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , ) return self.image_processor
254
"""simple docstring""" import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=9_9 , __UpperCAmelCase=3_2 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=3_7 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=1_6 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="None" , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = parent lowerCAmelCase__ :int = batch_size lowerCAmelCase__ :List[str] = seq_length lowerCAmelCase__ :Tuple = is_training lowerCAmelCase__ :Tuple = use_input_mask lowerCAmelCase__ :Dict = use_token_type_ids lowerCAmelCase__ :Union[str, Any] = use_labels lowerCAmelCase__ :Tuple = vocab_size lowerCAmelCase__ :List[Any] = hidden_size lowerCAmelCase__ :Tuple = num_hidden_layers lowerCAmelCase__ :str = num_attention_heads lowerCAmelCase__ :List[str] = intermediate_size lowerCAmelCase__ :Optional[Any] = hidden_act lowerCAmelCase__ :Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ :Any = attention_probs_dropout_prob lowerCAmelCase__ :Dict = max_position_embeddings lowerCAmelCase__ :Tuple = type_vocab_size lowerCAmelCase__ :List[str] = type_sequence_label_size lowerCAmelCase__ :Tuple = initializer_range lowerCAmelCase__ :Optional[Any] = num_labels lowerCAmelCase__ :int = num_choices lowerCAmelCase__ :Union[str, Any] = relative_attention lowerCAmelCase__ :int = position_biased_input lowerCAmelCase__ :Optional[int] = pos_att_type lowerCAmelCase__ :Dict = scope def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ :int = None if self.use_input_mask: lowerCAmelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) lowerCAmelCase__ :Optional[Any] = None if self.use_token_type_ids: lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ :Dict = None lowerCAmelCase__ :Union[str, Any] = None lowerCAmelCase__ :Dict = None if self.use_labels: lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ :Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ :Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self ): '''simple docstring''' return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = self.get_config() lowerCAmelCase__ :Optional[Any] = 3_0_0 return config def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = DebertaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :Any = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0] lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0] lowerCAmelCase__ :Dict = model(__UpperCAmelCase )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :str = DebertaForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.num_labels lowerCAmelCase__ :int = DebertaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.num_labels lowerCAmelCase__ :Any = DebertaForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :int = DebertaForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :str = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) :Tuple = config_and_inputs lowerCAmelCase__ :int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( a , a , unittest.TestCase ): """simple docstring""" __magic_name__ :List[str] = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) __magic_name__ :Optional[Any] = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ :Tuple = True __magic_name__ :List[Any] = False __magic_name__ :Optional[Any] = False __magic_name__ :str = False __magic_name__ :int = False def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = DebertaModelTester(self ) lowerCAmelCase__ :List[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 ) def snake_case ( self ): '''simple docstring''' self.config_tester.run_common_tests() def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCAmelCase ) @slow def snake_case ( self ): '''simple docstring''' for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ :int = DebertaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @require_torch @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason='Model not available yet' ) def snake_case ( self ): '''simple docstring''' pass @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = DebertaModel.from_pretrained('microsoft/deberta-base' ) lowerCAmelCase__ :str = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) lowerCAmelCase__ :Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCAmelCase__ :int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] # compare the actual values for a slice. lowerCAmelCase__ :str = torch.tensor( [[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
254
1
"""simple docstring""" _lowercase : List[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _lowercase : List[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}] _lowercase : Optional[int] = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
332
"""simple docstring""" _lowercase : Any = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _lowercase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}] _lowercase : int = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
332
1
'''simple docstring''' import os from typing import Dict, List, Tuple, TypeVar, Union lowercase : Optional[Any] = TypeVar('T') lowercase : Dict = Union[List[T], Tuple[T, ...]] lowercase : int = Union[T, List[T], Dict[str, T]] lowercase : Optional[Any] = Union[str, bytes, os.PathLike]
311
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=sys.maxsize ) -> Union[str, Any]: """simple docstring""" A : Tuple = '''bilinear''' A : Optional[int] = max_size A : Dict = short_edge_length def __call__( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Tuple = [] for img in imgs: A, A : str = img.shape[:2] # later: provide list and randomly choose index for resize A : Union[str, Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img A : int = size * 1.0 / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if h < w: A, A : Tuple = size, scale * w else: A, A : str = scale * h, size if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > self.max_size: A : List[str] = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Tuple = newh * scale A : int = neww * scale A : List[str] = int(neww + 0.5 ) A : int = int(newh + 0.5 ) if img.dtype == np.uinta: A : Dict = Image.fromarray(SCREAMING_SNAKE_CASE ) A : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) A : str = np.asarray(SCREAMING_SNAKE_CASE ) else: A : Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw A : List[Any] = nn.functional.interpolate( SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE ).squeeze(0 ) img_augs.append(SCREAMING_SNAKE_CASE ) return img_augs class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) A : str = cfg.INPUT.FORMAT A : int = cfg.SIZE_DIVISIBILITY A : Optional[int] = cfg.PAD_VALUE A : Dict = cfg.INPUT.MAX_SIZE_TEST A : Optional[Any] = cfg.MODEL.DEVICE A : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : str = lambda SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = tuple(max(SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) ) A : List[str] = [im.shape[-2:] for im in images] A : Optional[Any] = [ nn.functional.pad( SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] return torch.stack(SCREAMING_SNAKE_CASE ), torch.tensor(SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : str = [images] if single_image: assert len(SCREAMING_SNAKE_CASE ) == 1 for i in range(len(SCREAMING_SNAKE_CASE ) ): if isinstance(images[i] , torch.Tensor ): images.insert(SCREAMING_SNAKE_CASE , images.pop(SCREAMING_SNAKE_CASE ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge A : Tuple = torch.tensor([im.shape[:2] for im in images] ) A : Dict = self.aug(SCREAMING_SNAKE_CASE ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic A : Tuple = [self.normalizer(SCREAMING_SNAKE_CASE ) for x in images] # now pad them to do the following operations A, A : Optional[int] = self.pad(SCREAMING_SNAKE_CASE ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad A : Tuple = torch.true_divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!" A, A : str = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__ ) tensor[:, 1].clamp_(min=0 , max=snake_case__ ) tensor[:, 2].clamp_(min=0 , max=snake_case__ ) tensor[:, 3].clamp_(min=0 , max=snake_case__ )
311
1
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = DebertaTokenizer _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = DebertaTokenizerFast def lowerCAmelCase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "[UNK]", ] snake_case_ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) ) snake_case_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] snake_case_ = {"unk_token": "[UNK]"} snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_lowerCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_lowerCAmelCase ) ) def lowerCAmelCase__ ( self : str , **_lowerCAmelCase : List[Any] ) -> Optional[Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def lowerCAmelCase__ ( self : List[Any] , _lowerCAmelCase : Dict ) -> List[Any]: """simple docstring""" snake_case_ = "lower newer" snake_case_ = "lower newer" return input_text, output_text def lowerCAmelCase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = "lower newer" snake_case_ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] snake_case_ = tokenizer.tokenize(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) snake_case_ = tokens + [tokenizer.unk_token] snake_case_ = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase ) def lowerCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" snake_case_ = self.get_tokenizer() snake_case_ = tokenizer("Hello" , "World" ) snake_case_ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd["token_type_ids"] , _lowerCAmelCase ) @slow def lowerCAmelCase__ ( self : str ) -> Tuple: """simple docstring""" snake_case_ = self.tokenizer_class.from_pretrained("microsoft/deberta-base" ) snake_case_ = tokenizer.encode("sequence builders" , add_special_tokens=_lowerCAmelCase ) snake_case_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_lowerCAmelCase ) snake_case_ = tokenizer.encode( "sequence builders" , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase ) snake_case_ = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase ) snake_case_ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ) snake_case_ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def lowerCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" snake_case_ = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: snake_case_ = tokenizer_class.from_pretrained("microsoft/deberta-base" ) snake_case_ = [ "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", "ALBERT incorporates two parameter reduction techniques", "The first one is a factorized embedding parameterization. By decomposing the large vocabulary" " embedding matrix into two small matrices, we separate the size of the hidden layers from the size of" " vocabulary embedding.", ] snake_case_ = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase ) snake_case_ = [tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) for seq in encoding["input_ids"]] # fmt: off snake_case_ = { "input_ids": [ [1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2] ], "token_type_ids": [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on snake_case_ = [ "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", "ALBERT incorporates two parameter reduction techniques", "The first one is a factorized embedding parameterization. By decomposing the large vocabulary" " embedding matrix into two small matrices, we separate the size of the hidden layers from the size of" " vocabulary embedding.", ] self.assertDictEqual(encoding.data , _lowerCAmelCase ) for expected, decoded in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
159
from __future__ import annotations from collections import namedtuple def _lowerCAmelCase ( lowerCAmelCase_ :float , lowerCAmelCase_ :float , lowerCAmelCase_ :float )->tuple: '''simple docstring''' snake_case_ = namedtuple("result" , "name value" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("Only one argument must be 0" ) elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" ) elif voltage == 0: return result("voltage" , power / current ) elif current == 0: return result("current" , power / voltage ) elif power == 0: return result("power" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
159
1
def snake_case (__lowercase ) -> tuple[int, int]: '''simple docstring''' try: _snake_case : Optional[int] = float(__lowercase ) except ValueError: raise ValueError("Please enter a valid number" ) _snake_case : Dict = decimal - int(__lowercase ) if fractional_part == 0: return int(__lowercase ), 1 else: _snake_case : str = len(str(__lowercase ).split("." )[1] ) _snake_case : int = int(decimal * (10**number_of_frac_digits) ) _snake_case : Optional[Any] = 10**number_of_frac_digits _snake_case ,_snake_case : List[str] = denominator, numerator while True: _snake_case : Union[str, Any] = dividend % divisor if remainder == 0: break _snake_case ,_snake_case : int = divisor, remainder _snake_case ,_snake_case : str = numerator / divisor, denominator / divisor return int(__lowercase ), int(__lowercase ) if __name__ == "__main__": print(F'''{decimal_to_fraction(2) = }''') print(F'''{decimal_to_fraction(89.0) = }''') print(F'''{decimal_to_fraction('67') = }''') print(F'''{decimal_to_fraction('45.0') = }''') print(F'''{decimal_to_fraction(1.5) = }''') print(F'''{decimal_to_fraction('6.25') = }''') print(F'''{decimal_to_fraction('78td') = }''')
284
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE : Any = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] = [ 'MRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MraForMaskedLM', 'MraForMultipleChoice', 'MraForQuestionAnswering', 'MraForSequenceClassification', 'MraForTokenClassification', 'MraLayer', 'MraModel', 'MraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
284
1
"""simple docstring""" from math import factorial def lowercase ( A_ , A_ )-> int: '''simple docstring''' if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k" ) return factorial(A_ ) // (factorial(A_ ) * factorial(n - k )) if __name__ == "__main__": print( """The number of five-card hands possible from a standard""", f'''fifty-two card deck is: {combinations(52, 5)}\n''', ) print( """If a class of 40 students must be arranged into groups of""", f'''4 for group projects, there are {combinations(40, 4)} ways''', """to arrange them.\n""", ) print( """If 10 teams are competing in a Formula One race, there""", f'''are {combinations(10, 3)} ways that first, second and''', """third place can be awarded.""", )
40
"""simple docstring""" class _A : """simple docstring""" def __init__( self : int , __UpperCAmelCase : int): a : Tuple = size a : Dict = [0] * size a : Optional[int] = [0] * size @staticmethod def __snake_case ( __UpperCAmelCase : int): return index | (index + 1) @staticmethod def __snake_case ( __UpperCAmelCase : int): return (index & (index + 1)) - 1 def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int): a : Union[str, Any] = value while index < self.size: a : Dict = self.get_prev(__UpperCAmelCase) + 1 if current_left_border == index: a : Optional[int] = value else: a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = self.get_next(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): right -= 1 # Because of right is exclusive a : List[str] = 0 while left <= right: a : Dict = self.get_prev(__UpperCAmelCase) if left <= current_left: a : Optional[int] = max(__UpperCAmelCase , self.tree[right]) a : Optional[Any] = current_left else: a : List[str] = max(__UpperCAmelCase , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
40
1
def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = len(_A ) lowerCAmelCase_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): lowerCAmelCase_ = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): lowerCAmelCase_ = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: lowerCAmelCase_ = subset[i - 1][j] if arr[i - 1] <= j: lowerCAmelCase_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
167
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _A = logging.get_logger(__name__) _A = { '''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''', # See all Dinat models at https://huggingface.co/models?filter=dinat } class A ( __UpperCAmelCase , __UpperCAmelCase ): __snake_case = 'dinat' __snake_case = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self, UpperCamelCase__=4, UpperCamelCase__=3, UpperCamelCase__=64, UpperCamelCase__=[3, 4, 6, 5], UpperCamelCase__=[2, 4, 8, 16], UpperCamelCase__=7, UpperCamelCase__=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]], UpperCamelCase__=3.0, UpperCamelCase__=True, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.1, UpperCamelCase__="gelu", UpperCamelCase__=0.02, UpperCamelCase__=1E-5, UpperCamelCase__=0.0, UpperCamelCase__=None, UpperCamelCase__=None, **UpperCamelCase__, ): """simple docstring""" super().__init__(**UpperCamelCase__ ) lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = embed_dim lowerCAmelCase_ = depths lowerCAmelCase_ = len(UpperCamelCase__ ) lowerCAmelCase_ = num_heads lowerCAmelCase_ = kernel_size lowerCAmelCase_ = dilations lowerCAmelCase_ = mlp_ratio lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = drop_path_rate lowerCAmelCase_ = hidden_act lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase_ = int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) ) lowerCAmelCase_ = layer_scale_init_value lowerCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1, len(UpperCamelCase__ ) + 1 )] lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices( out_features=UpperCamelCase__, out_indices=UpperCamelCase__, stage_names=self.stage_names )
167
1
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch _snake_case = '''sshleifer/bart-tiny-random''' _snake_case = '''patrickvonplaten/t5-tiny-random''' @require_torch class UpperCAmelCase_ ( unittest.TestCase): @cached_property def snake_case__ ( self): '''simple docstring''' return AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase , *_lowerCAmelCase : str = create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE, tempfile.mkdtemp(), e=1, d=1) self.assertEqual(student.config.num_hidden_layers, 1) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase , *_lowerCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE, tempfile.mkdtemp(), e=1, d=_SCREAMING_SNAKE_CASE) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase , *_lowerCAmelCase : Any = create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE, tempfile.mkdtemp(), e=1, d=_SCREAMING_SNAKE_CASE) self.assertEqual(student.config.encoder_layers, 1) self.assertEqual(student.config.decoder_layers, self.teacher_config.encoder_layers) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase , *_lowerCAmelCase : List[str] = create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE, tempfile.mkdtemp(), e=1, d=1) self.assertEqual(student.config.encoder_layers, 1) self.assertEqual(student.config.decoder_layers, 1) def snake_case__ ( self): '''simple docstring''' with self.assertRaises(_SCREAMING_SNAKE_CASE): create_student_by_copying_alternating_layers(_SCREAMING_SNAKE_CASE, tempfile.mkdtemp(), e=_SCREAMING_SNAKE_CASE, d=_SCREAMING_SNAKE_CASE)
36
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]: '''simple docstring''' from .. import __version__ _UpperCAmelCase = take_from _UpperCAmelCase = () if not isinstance(args[0] , a__ ): _UpperCAmelCase = (args,) for attribute, version_name, message in args: if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ): raise ValueError( F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'''' F''' version {__version__} is >= {version_name}''' ) _UpperCAmelCase = None if isinstance(a__ , a__ ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(a__ ),) _UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.''' elif hasattr(a__ , a__ ): values += (getattr(a__ , a__ ),) _UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.''' elif deprecated_kwargs is None: _UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.''' if warning is not None: _UpperCAmelCase = warning + ' ' if standard_warn else '' warnings.warn(warning + message , a__ , stacklevel=a__ ) if isinstance(a__ , a__ ) and len(a__ ) > 0: _UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1] _UpperCAmelCase = call_frame.filename _UpperCAmelCase = call_frame.lineno _UpperCAmelCase = call_frame.function _UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' ) if len(a__ ) == 0: return elif len(a__ ) == 1: return values[0] return values
329
0
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def a__ ( snake_case__ ) -> str: if not isinstance(snake_case__ , snake_case__ ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) lowerCamelCase = precision lowerCamelCase = ceil(precision / 14 ) lowerCamelCase = 42_68_80 * Decimal(1_00_05 ).sqrt() lowerCamelCase = 1 lowerCamelCase = 13_59_14_09 lowerCamelCase = Decimal(snake_case__ ) for k in range(1 , snake_case__ ): lowerCamelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(snake_case__ ) ** 3) linear_term += 5_45_14_01_34 exponential_term *= -26_25_37_41_26_40_76_80_00 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase : Union[str, Any] = 50 print(F"""The first {n} digits of pi is: {pi(n)}""")
168
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def a__ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ) -> Tuple: if attention_mask is None: lowerCamelCase = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class __magic_name__ : '''simple docstring''' __UpperCamelCase = OPTConfig __UpperCamelCase = {} __UpperCamelCase = "gelu" def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=16 , _a=2 , _a=4 , _a=4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , _a=16 , _a=16 , ): """simple docstring""" lowerCamelCase = parent lowerCamelCase = batch_size lowerCamelCase = seq_length lowerCamelCase = is_training lowerCamelCase = use_labels lowerCamelCase = vocab_size lowerCamelCase = hidden_size lowerCamelCase = num_hidden_layers lowerCamelCase = num_attention_heads lowerCamelCase = intermediate_size lowerCamelCase = hidden_act lowerCamelCase = hidden_dropout_prob lowerCamelCase = attention_probs_dropout_prob lowerCamelCase = max_position_embeddings lowerCamelCase = eos_token_id lowerCamelCase = pad_token_id lowerCamelCase = bos_token_id lowerCamelCase = embed_dim lowerCamelCase = word_embed_proj_dim lowerCamelCase = False def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCamelCase = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_a , **self.config_updates , ) lowerCamelCase = prepare_opt_inputs_dict(_a , _a ) return config, inputs_dict def _lowerCAmelCase ( self , _a , _a ): """simple docstring""" lowerCamelCase = TFOPTModel(config=_a ) lowerCamelCase = inputs_dict["""input_ids"""] lowerCamelCase = input_ids[:1, :] lowerCamelCase = inputs_dict["""attention_mask"""][:1, :] lowerCamelCase = 1 # first forward pass lowerCamelCase = model(_a , attention_mask=_a , use_cache=_a ) lowerCamelCase , lowerCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCamelCase = model(_a , attention_mask=_a )[0] lowerCamelCase = model(_a , attention_mask=_a , past_key_values=_a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx] lowerCamelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_a , _a , rtol=1e-3 ) @require_tf class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () __UpperCamelCase = (TFOPTForCausalLM,) if is_tf_available() else () __UpperCamelCase = ( {"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = 10 def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = TFOPTModelTester(self ) lowerCamelCase = ConfigTester(self , config_class=_a ) def _lowerCAmelCase ( self ): """simple docstring""" self.config_tester.run_common_tests() def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(_a , _a ): if hasattr(_a , """weight""" ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(_a , """weight""" ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings lowerCamelCase = model_class(config=_a ) lowerCamelCase = _get_word_embedding_weight(_a , model.get_input_embeddings() ) lowerCamelCase = _get_word_embedding_weight(_a , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(_a ) lowerCamelCase = _get_word_embedding_weight(_a , model.get_input_embeddings() ) lowerCamelCase = _get_word_embedding_weight(_a , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. lowerCamelCase = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , _a ) # check that weights remain the same after resizing lowerCamelCase = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowerCamelCase = False self.assertTrue(_a ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , _a ) lowerCamelCase = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowerCamelCase = False self.assertTrue(_a ) def a__ ( snake_case__ ) -> List[Any]: return tf.constant(snake_case__ , dtype=tf.intaa ) @require_tf class __magic_name__ ( unittest.TestCase ): '''simple docstring''' __UpperCamelCase = 99 def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2 lowerCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) lowerCamelCase = input_ids.shape[0] lowerCamelCase = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class __magic_name__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = TFOPTModel.from_pretrained("""facebook/opt-350m""" ) lowerCamelCase = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) lowerCamelCase = tf.not_equal(_a , model.config.pad_token_id ) with tf.GradientTape(): lowerCamelCase = model(input_ids=_a , attention_mask=_a ).last_hidden_state lowerCamelCase = (1, 11, 512) self.assertEqual(output.shape , _a ) lowerCamelCase = tf.constant( [[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] ) self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=4e-3 ) ) lowerCamelCase = tf.function(_a , jit_compile=_a ) lowerCamelCase = xla_generate(_a , _a )[0] self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=4e-2 ) ) @require_tf @slow class __magic_name__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ): """simple docstring""" super().setUp() lowerCamelCase = """facebook/opt-350m""" def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model ) lowerCamelCase = GPTaTokenizer.from_pretrained(self.path_model ) lowerCamelCase = [ """Today is a beautiful day and I want to""", """In the city of""", """Paris is the capital of France and""", """Computers and mobile phones have taken""", ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False lowerCamelCase = tokenizer(_a , return_tensors="""tf""" , padding=_a , add_special_tokens=_a ) lowerCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) lowerCamelCase = tf.constant( [ [1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670], [-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822], [0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703], [6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477], ] ) self.assertTrue(np.allclose(_a , _a , atol=1e-4 ) ) lowerCamelCase = tf.function(_a , jit_compile=_a ) lowerCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(_a , _a , atol=1e-4 ) ) @require_tf @slow class __magic_name__ ( unittest.TestCase ): '''simple docstring''' @property def _lowerCAmelCase ( self ): """simple docstring""" return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = """facebook/opt-125m""" lowerCamelCase = [ """Today is a beautiful day and I want to""", """In the city of New York, the city""", """Paris is the capital of France and the capital""", """Computers and mobile phones have taken over the""", ] lowerCamelCase = [] lowerCamelCase = GPTaTokenizer.from_pretrained(_a ) lowerCamelCase = TFOPTForCausalLM.from_pretrained(_a ) for prompt in self.prompts: lowerCamelCase = tokenizer(_a , return_tensors="""tf""" ).input_ids lowerCamelCase = model.generate(_a , max_length=10 ) lowerCamelCase = tokenizer.batch_decode(_a , skip_special_tokens=_a ) predicted_outputs += generated_string self.assertListEqual(_a , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = """facebook/opt-350m""" lowerCamelCase = GPTaTokenizer.from_pretrained(_a ) lowerCamelCase = TFOPTForCausalLM.from_pretrained(_a ) lowerCamelCase = """left""" # use different length sentences to test batching lowerCamelCase = [ """Hello, my dog is a little""", """Today, I""", ] lowerCamelCase = tokenizer(_a , return_tensors="""tf""" , padding=_a ) lowerCamelCase = inputs["""input_ids"""] lowerCamelCase = model.generate(input_ids=_a , attention_mask=inputs["""attention_mask"""] ) lowerCamelCase = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids lowerCamelCase = model.generate(input_ids=_a ) lowerCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) ) lowerCamelCase = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids lowerCamelCase = model.generate(input_ids=_a , max_length=model.config.max_length - num_paddings ) lowerCamelCase = tokenizer.batch_decode(_a , skip_special_tokens=_a ) lowerCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_a ) lowerCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=_a ) lowerCamelCase = [ """Hello, my dog is a little bit of a dork.\nI'm a little bit""", """Today, I was in the middle of a conversation with a friend about the""", ] self.assertListEqual(_a , _a ) self.assertListEqual(_a , [non_padded_sentence, padded_sentence] ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = """facebook/opt-350m""" lowerCamelCase = [ """Today is a beautiful day and I want to""", """In the city of San Francisco, the city""", """Paris is the capital of France and the capital""", """Computers and mobile phones have taken over the""", ] lowerCamelCase = [] lowerCamelCase = GPTaTokenizer.from_pretrained(_a ) lowerCamelCase = TFOPTForCausalLM.from_pretrained(_a ) for prompt in self.prompts: lowerCamelCase = tokenizer(_a , return_tensors="""tf""" ).input_ids lowerCamelCase = model.generate(_a , max_length=10 ) lowerCamelCase = tokenizer.batch_decode(_a , skip_special_tokens=_a ) predicted_outputs += generated_string self.assertListEqual(_a , _a )
168
1
"""simple docstring""" def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ) -> Any: _lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) while cur > 1: # Find the maximum number in arr _lowerCAmelCase : Any = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _lowerCAmelCase : Union[str, Any] = arr[mi::-1] + arr[mi + 1 : len(_lowerCamelCase )] # Reverse whole list _lowerCAmelCase : Dict = arr[cur - 1 :: -1] + arr[cur : len(_lowerCamelCase )] cur -= 1 return arr if __name__ == "__main__": _a : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip() _a : Dict = [int(item) for item in user_input.split(',')] print(pancake_sort(unsorted))
44
"""simple docstring""" def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = False while is_sorted is False: # Until all the indices are traversed keep looping __lowerCAmelCase = True for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: __lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i] # swapping if elements not in order __lowerCAmelCase = False for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: __lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i] # swapping if elements not in order __lowerCAmelCase = False return input_list if __name__ == "__main__": print("Enter list to be sorted") A : Union[str, Any] = [int(x) for x in input().split()] # inputing elements of the list in one line A : str = odd_even_sort(input_list) print("The sorted list is") print(sorted_list)
57
0
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar lowerCAmelCase = TypeVar('T') class _a ( Generic[T] ): def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int] ) -> Dict: """simple docstring""" lowercase__ = data lowercase__ = None def __str__( self: Optional[Any] ) -> str: """simple docstring""" return f'{self.data}' class _a ( Generic[T] ): def __init__( self: List[Any] ) -> None: """simple docstring""" lowercase__ = None def __iter__( self: Any ) -> Iterator[T]: """simple docstring""" lowercase__ = self.top while node: yield node.data lowercase__ = node.next def __str__( self: List[Any] ) -> str: """simple docstring""" return "->".join([str(a__ ) for item in self] ) def __len__( self: Any ) -> int: """simple docstring""" return len(tuple(iter(self ) ) ) def lowerCamelCase_ ( self: Dict ) -> bool: """simple docstring""" return self.top is None def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: str ) -> None: """simple docstring""" lowercase__ = Node(a__ ) if not self.is_empty(): lowercase__ = self.top lowercase__ = node def lowerCamelCase_ ( self: Dict ) -> T: """simple docstring""" if self.is_empty(): raise IndexError('''pop from empty stack''' ) assert isinstance(self.top , a__ ) lowercase__ = self.top lowercase__ = self.top.next return pop_node.data def lowerCamelCase_ ( self: List[str] ) -> T: """simple docstring""" if self.is_empty(): raise IndexError('''peek from empty stack''' ) assert self.top is not None return self.top.data def lowerCamelCase_ ( self: int ) -> None: """simple docstring""" lowercase__ = None if __name__ == "__main__": from doctest import testmod testmod()
354
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase = { 'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'], 'tokenization_luke': ['LukeTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST', 'LukeForEntityClassification', 'LukeForEntityPairClassification', 'LukeForEntitySpanClassification', 'LukeForMultipleChoice', 'LukeForQuestionAnswering', 'LukeForSequenceClassification', 'LukeForTokenClassification', 'LukeForMaskedLM', 'LukeModel', 'LukePreTrainedModel', ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
93
0
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig a : List[str] = logging.get_logger(__name__) # General docstring a : str = "MobileNetV1Config" # Base docstring a : Any = "google/mobilenet_v1_1.0_224" a : Dict = [1, 10_24, 7, 7] # Image classification docstring a : Dict = "google/mobilenet_v1_1.0_224" a : Tuple = "tabby, tabby cat" a : List[str] = [ "google/mobilenet_v1_1.0_224", "google/mobilenet_v1_0.75_192", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : Dict = {} if isinstance(__magic_name__ , __magic_name__ ): UpperCAmelCase : str = model.mobilenet_va else: UpperCAmelCase : List[Any] = model UpperCAmelCase : Any = "MobilenetV1/Conv2d_0/" UpperCAmelCase : Dict = backbone.conv_stem.convolution.weight UpperCAmelCase : Optional[int] = backbone.conv_stem.normalization.bias UpperCAmelCase : List[Any] = backbone.conv_stem.normalization.weight UpperCAmelCase : Any = backbone.conv_stem.normalization.running_mean UpperCAmelCase : Union[str, Any] = backbone.conv_stem.normalization.running_var for i in range(13 ): UpperCAmelCase : List[str] = i + 1 UpperCAmelCase : int = i * 2 UpperCAmelCase : Union[str, Any] = backbone.layer[pt_index] UpperCAmelCase : Optional[Any] = F"MobilenetV1/Conv2d_{tf_index}_depthwise/" UpperCAmelCase : int = pointer.convolution.weight UpperCAmelCase : List[Any] = pointer.normalization.bias UpperCAmelCase : Tuple = pointer.normalization.weight UpperCAmelCase : Optional[int] = pointer.normalization.running_mean UpperCAmelCase : int = pointer.normalization.running_var UpperCAmelCase : Optional[Any] = backbone.layer[pt_index + 1] UpperCAmelCase : Dict = F"MobilenetV1/Conv2d_{tf_index}_pointwise/" UpperCAmelCase : Dict = pointer.convolution.weight UpperCAmelCase : str = pointer.normalization.bias UpperCAmelCase : Optional[Any] = pointer.normalization.weight UpperCAmelCase : Optional[Any] = pointer.normalization.running_mean UpperCAmelCase : Tuple = pointer.normalization.running_var if isinstance(__magic_name__ , __magic_name__ ): UpperCAmelCase : Dict = "MobilenetV1/Logits/Conv2d_1c_1x1/" UpperCAmelCase : Any = model.classifier.weight UpperCAmelCase : Union[str, Any] = model.classifier.bias return tf_to_pt_map def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model UpperCAmelCase : Tuple = tf.train.list_variables(__magic_name__ ) UpperCAmelCase : List[Any] = {} for name, shape in init_vars: logger.info(F"Loading TF weight {name} with shape {shape}" ) UpperCAmelCase : List[Any] = tf.train.load_variable(__magic_name__ , __magic_name__ ) UpperCAmelCase : int = array # Build TF to PyTorch weights loading map UpperCAmelCase : Union[str, Any] = _build_tf_to_pytorch_map(__magic_name__ , __magic_name__ , __magic_name__ ) for name, pointer in tf_to_pt_map.items(): logger.info(F"Importing {name}" ) if name not in tf_weights: logger.info(F"{name} not in tf pre-trained weights, skipping" ) continue UpperCAmelCase : List[Any] = tf_weights[name] if "depthwise_weights" in name: logger.info("Transposing depthwise" ) UpperCAmelCase : List[str] = np.transpose(__magic_name__ , (2, 3, 0, 1) ) elif "weights" in name: logger.info("Transposing" ) if len(pointer.shape ) == 2: # copying into linear layer UpperCAmelCase : List[Any] = array.squeeze().transpose() else: UpperCAmelCase : List[Any] = np.transpose(__magic_name__ , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" ) logger.info(F"Initialize PyTorch weight {name} {array.shape}" ) UpperCAmelCase : List[str] = torch.from_numpy(__magic_name__ ) tf_weights.pop(__magic_name__ , __magic_name__ ) tf_weights.pop(name + "/RMSProp" , __magic_name__ ) tf_weights.pop(name + "/RMSProp_1" , __magic_name__ ) tf_weights.pop(name + "/ExponentialMovingAverage" , __magic_name__ ) logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" ) return model def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Any = features.shape[-2:] UpperCAmelCase , UpperCAmelCase : Union[str, Any] = conv_layer.stride UpperCAmelCase , UpperCAmelCase : Dict = conv_layer.kernel_size if in_height % stride_height == 0: UpperCAmelCase : List[Any] = max(kernel_height - stride_height , 0 ) else: UpperCAmelCase : Optional[int] = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: UpperCAmelCase : str = max(kernel_width - stride_width , 0 ) else: UpperCAmelCase : Dict = max(kernel_width - (in_width % stride_width) , 0 ) UpperCAmelCase : List[Any] = pad_along_width // 2 UpperCAmelCase : Tuple = pad_along_width - pad_left UpperCAmelCase : Dict = pad_along_height // 2 UpperCAmelCase : Any = pad_along_height - pad_top UpperCAmelCase : List[str] = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(__magic_name__ , __magic_name__ , "constant" , 0.0 ) class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1 , snake_case = 1 , snake_case = False , snake_case = True , snake_case = True , ): '''simple docstring''' super().__init__() UpperCAmelCase : Union[str, Any] = config if in_channels % groups != 0: raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups." ) if out_channels % groups != 0: raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups." ) UpperCAmelCase : List[str] = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) UpperCAmelCase : Any = nn.Convad( in_channels=snake_case , out_channels=snake_case , kernel_size=snake_case , stride=snake_case , padding=snake_case , groups=snake_case , bias=snake_case , padding_mode="zeros" , ) if use_normalization: UpperCAmelCase : Tuple = nn.BatchNormad( num_features=snake_case , eps=config.layer_norm_eps , momentum=0.9997 , affine=snake_case , track_running_stats=snake_case , ) else: UpperCAmelCase : Tuple = None if use_activation: if isinstance(snake_case , snake_case ): UpperCAmelCase : List[Any] = ACTaFN[use_activation] elif isinstance(config.hidden_act , snake_case ): UpperCAmelCase : Optional[int] = ACTaFN[config.hidden_act] else: UpperCAmelCase : Optional[int] = config.hidden_act else: UpperCAmelCase : str = None def A_ ( self , snake_case ): '''simple docstring''' if self.config.tf_padding: UpperCAmelCase : Union[str, Any] = apply_tf_padding(snake_case , self.convolution ) UpperCAmelCase : Tuple = self.convolution(snake_case ) if self.normalization is not None: UpperCAmelCase : List[str] = self.normalization(snake_case ) if self.activation is not None: UpperCAmelCase : str = self.activation(snake_case ) return features class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MobileNetVaConfig SCREAMING_SNAKE_CASE__ : Optional[Any] = load_tf_weights_in_mobilenet_va SCREAMING_SNAKE_CASE__ : List[Any] = "mobilenet_v1" SCREAMING_SNAKE_CASE__ : int = "pixel_values" SCREAMING_SNAKE_CASE__ : Union[str, Any] = False def A_ ( self , snake_case ): '''simple docstring''' if isinstance(snake_case , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(snake_case , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) a : str = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" a : Union[str, Any] = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowercase__ , ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case , snake_case = True ): '''simple docstring''' super().__init__(snake_case ) UpperCAmelCase : Tuple = config UpperCAmelCase : int = 3_2 UpperCAmelCase : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth ) UpperCAmelCase : List[str] = MobileNetVaConvLayer( snake_case , in_channels=config.num_channels , out_channels=snake_case , kernel_size=3 , stride=2 , ) UpperCAmelCase : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] UpperCAmelCase : Any = nn.ModuleList() for i in range(1_3 ): UpperCAmelCase : List[Any] = out_channels if strides[i] == 2 or i == 0: depth *= 2 UpperCAmelCase : Any = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( snake_case , in_channels=snake_case , out_channels=snake_case , kernel_size=3 , stride=strides[i] , groups=snake_case , ) ) self.layer.append( MobileNetVaConvLayer( snake_case , in_channels=snake_case , out_channels=snake_case , kernel_size=1 , ) ) UpperCAmelCase : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def A_ ( self , snake_case ): '''simple docstring''' raise NotImplementedError @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def A_ ( self , snake_case = None , snake_case = None , snake_case = None , ): '''simple docstring''' UpperCAmelCase : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) UpperCAmelCase : Tuple = self.conv_stem(snake_case ) UpperCAmelCase : str = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): UpperCAmelCase : Union[str, Any] = layer_module(snake_case ) if output_hidden_states: UpperCAmelCase : int = all_hidden_states + (hidden_states,) UpperCAmelCase : Union[str, Any] = hidden_states if self.pooler is not None: UpperCAmelCase : int = torch.flatten(self.pooler(snake_case ) , start_dim=1 ) else: UpperCAmelCase : List[Any] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=snake_case , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowercase__ , ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__(snake_case ) UpperCAmelCase : Optional[Any] = config.num_labels UpperCAmelCase : Any = MobileNetVaModel(snake_case ) UpperCAmelCase : Union[str, Any] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head UpperCAmelCase : int = nn.Dropout(config.classifier_dropout_prob , inplace=snake_case ) UpperCAmelCase : str = nn.Linear(snake_case , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def A_ ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ): '''simple docstring''' UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase : int = self.mobilenet_va(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) UpperCAmelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1] UpperCAmelCase : Dict = self.classifier(self.dropout(snake_case ) ) UpperCAmelCase : Optional[int] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: UpperCAmelCase : List[Any] = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): UpperCAmelCase : Dict = "single_label_classification" else: UpperCAmelCase : Optional[int] = "multi_label_classification" if self.config.problem_type == "regression": UpperCAmelCase : Union[str, Any] = MSELoss() if self.num_labels == 1: UpperCAmelCase : Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: UpperCAmelCase : Tuple = loss_fct(snake_case , snake_case ) elif self.config.problem_type == "single_label_classification": UpperCAmelCase : str = CrossEntropyLoss() UpperCAmelCase : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": UpperCAmelCase : Union[str, Any] = BCEWithLogitsLoss() UpperCAmelCase : Optional[Any] = loss_fct(snake_case , snake_case ) if not return_dict: UpperCAmelCase : Any = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states , )
311
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if number < 0: raise ValueError("number must not be negative" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
311
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowercase = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
35
from __future__ import annotations from typing import Any def lowerCamelCase_ ( UpperCamelCase__ : list ): '''simple docstring''' if not postfix_notation: return 0 UpperCamelCase__ = {'''+''', '''-''', '''*''', '''/'''} UpperCamelCase__ = [] for token in postfix_notation: if token in operations: UpperCamelCase__ , UpperCamelCase__ = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(UpperCamelCase__ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
35
1
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance _A = 6_378_137.0 _A = 6_356_752.314_245 _A = 637_8137 def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ): __UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) ) __UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __UpperCamelCase =haversine_distance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / EQUATORIAL_RADIUS # Intermediate P and Q values __UpperCamelCase =(b_lata + b_lata) / 2 __UpperCamelCase =(b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __UpperCamelCase =(sin(SCREAMING_SNAKE_CASE__ ) ** 2) * (cos(SCREAMING_SNAKE_CASE__ ) ** 2) __UpperCamelCase =cos(sigma / 2 ) ** 2 __UpperCamelCase =(sigma - sin(SCREAMING_SNAKE_CASE__ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __UpperCamelCase =(cos(SCREAMING_SNAKE_CASE__ ) ** 2) * (sin(SCREAMING_SNAKE_CASE__ ) ** 2) __UpperCamelCase =sin(sigma / 2 ) ** 2 __UpperCamelCase =(sigma + sin(SCREAMING_SNAKE_CASE__ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
62
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version A =logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') A ={ 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization A ={ 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } A =sorted(arg_to_scheduler.keys()) A ='{' + ', '.join(arg_to_scheduler_choices) + '}' class _a ( pl.LightningModule ): def __init__( self : List[str] , lowercase : argparse.Namespace , lowercase : List[Any]=None , lowercase : Dict="base" , lowercase : Optional[int]=None , lowercase : Dict=None , lowercase : Tuple=None , **lowercase : Optional[int] , ): '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowercase ) UpperCAmelCase = 0 UpperCAmelCase = Path(self.hparams.output_dir ) UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: UpperCAmelCase = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase , **lowercase , ) else: UpperCAmelCase = config UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , lowercase , lowercase ): assert hasattr(self.config , lowercase ), f"model config doesn't have a `{p}` attribute" setattr(self.config , lowercase , getattr(self.hparams , lowercase ) ) if tokenizer is None: UpperCAmelCase = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase , ) else: UpperCAmelCase = tokenizer UpperCAmelCase = MODEL_MODES[mode] if model is None: UpperCAmelCase = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase , ) else: UpperCAmelCase = model def A ( self : List[Any] , *lowercase : List[str] , **lowercase : List[str] ): '''simple docstring''' UpperCAmelCase = self.model_type.from_pretrained(*lowercase , **lowercase ) def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler] UpperCAmelCase = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) UpperCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.model UpperCAmelCase = ['''bias''', '''LayerNorm.weight'''] UpperCAmelCase = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: UpperCAmelCase = Adafactor( lowercase , lr=self.hparams.learning_rate , scale_parameter=lowercase , relative_step=lowercase ) else: UpperCAmelCase = AdamW( lowercase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) UpperCAmelCase = optimizer UpperCAmelCase = self.get_lr_scheduler() return [optimizer], [scheduler] def A ( self : List[Any] , lowercase : int , lowercase : List[str] ): '''simple docstring''' return self.validation_step(lowercase , lowercase ) def A ( self : List[Any] , lowercase : Tuple ): '''simple docstring''' return self.validation_end(lowercase ) def A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def A ( self : List[str] , lowercase : Any ): '''simple docstring''' if stage == "test": UpperCAmelCase = len(self.test_dataloader().dataset ) else: UpperCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase ) UpperCAmelCase = len(self.train_dataloader().dataset ) def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ): '''simple docstring''' raise NotImplementedError('''You must implement this for your task''' ) def A ( self : Union[str, Any] ): '''simple docstring''' return self.train_loader def A ( self : Optional[Any] ): '''simple docstring''' return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase ) def A ( self : List[Any] ): '''simple docstring''' return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase ) def A ( self : Any , lowercase : Union[str, Any] ): '''simple docstring''' return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( lowercase , list(filter(lowercase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def A ( self : List[str] , lowercase : Dict[str, Any] ): '''simple docstring''' UpperCAmelCase = self.output_dir.joinpath('''best_tfmr''' ) UpperCAmelCase = self.step_count self.model.save_pretrained(lowercase ) self.tokenizer.save_pretrained(lowercase ) @staticmethod def A ( lowercase : Optional[int] , lowercase : List[str] ): '''simple docstring''' parser.add_argument( '''--model_name_or_path''' , default=lowercase , type=lowercase , required=lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=lowercase , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=lowercase , type=lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(lowercase ).parent / '''test_run''' / '''cache''' ) , type=lowercase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=lowercase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=lowercase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=lowercase , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=lowercase , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=lowercase , metavar=lowercase , type=lowercase , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=lowercase , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase ) parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase ) parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class _a ( pl.Callback ): def A ( self : Dict , lowercase : Optional[Any] , lowercase : List[Any] ): '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _a ( pl.Callback ): def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Any ): '''simple docstring''' for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowercase ) class _a ( pl.Callback ): def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : Dict ): '''simple docstring''' UpperCAmelCase = trainer.lr_schedulers[0]['''scheduler'''] UpperCAmelCase = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowercase ) def A ( self : Tuple , lowercase : pl.Trainer , lowercase : pl.LightningModule ): '''simple docstring''' rank_zero_info('''***** Validation results *****''' ) UpperCAmelCase = trainer.callback_metrics # Log results for key in sorted(lowercase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) def A ( self : Dict , lowercase : pl.Trainer , lowercase : pl.LightningModule ): '''simple docstring''' rank_zero_info('''***** Test results *****''' ) UpperCAmelCase = trainer.callback_metrics # Log and save results to file UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(lowercase , '''w''' ) as writer: for key in sorted(lowercase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) def snake_case_ (_a : int , _a : Optional[Any] ): # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( '''--output_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=_a , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=_a , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=_a ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=_a , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=_a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=_a , default=4_2 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=_a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def snake_case_ (_a : BaseTransformer , _a : argparse.Namespace , _a : List[Any]=None , _a : Tuple=True , _a : int=[] , _a : Any=None , _a : int=None , **_a : Optional[Any] , ): pl.seed_everything(args.seed ) # init model UpperCAmelCase = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=_a ) # add custom checkpoints if checkpoint_callback is None: UpperCAmelCase = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(_a ) if logging_callback is None: UpperCAmelCase = LoggingCallback() UpperCAmelCase = {} if args.fpaa: UpperCAmelCase = 1_6 if args.gpus > 1: UpperCAmelCase = '''auto''' UpperCAmelCase = '''ddp''' UpperCAmelCase = args.accumulate_grad_batches UpperCAmelCase = None UpperCAmelCase = '''auto''' UpperCAmelCase = pl.Trainer.from_argparse_args( _a , weights_summary=_a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_a , val_check_interval=1 , num_sanity_val_steps=2 , **_a , ) if args.do_train: trainer.fit(_a ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
34
0
'''simple docstring''' import random def SCREAMING_SNAKE_CASE( __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Union[str, Any]: A: Union[str, Any] = a[left_index] A: List[Any] = left_index + 1 for j in range(left_index + 1 , __lowercase ): if a[j] < pivot: A: Tuple = a[i], a[j] i += 1 A: Any = a[i - 1], a[left_index] return i - 1 def SCREAMING_SNAKE_CASE( __lowercase : str , __lowercase : str , __lowercase : Dict ) -> Optional[int]: if left < right: A: List[Any] = random.randint(__lowercase , right - 1 ) A: Union[str, Any] = ( a[left], a[pivot], ) # switches the pivot with the left most bound A: List[Any] = partition(__lowercase , __lowercase , __lowercase ) quick_sort_random( __lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point quick_sort_random( __lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point def SCREAMING_SNAKE_CASE( ) -> Optional[int]: A: str = input('''Enter numbers separated by a comma:\n''' ).strip() A: Dict = [int(__lowercase ) for item in user_input.split(''',''' )] quick_sort_random(__lowercase , 0 , len(__lowercase ) ) print(__lowercase ) if __name__ == "__main__": main()
355
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[float]]: A: list[list[float]] = [] for data in source_data: for i, el in enumerate(__lowercase ): if len(__lowercase ) < i + 1: data_lists.append([] ) data_lists[i].append(float(__lowercase ) ) return data_lists def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[float]]: A: list[list[float]] = [] for dlist, weight in zip(__lowercase , __lowercase ): A: List[str] = min(__lowercase ) A: Union[str, Any] = max(__lowercase ) A: list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: A: List[str] = F"""Invalid weight of {weight:f} provided""" raise ValueError(__lowercase ) score_lists.append(__lowercase ) return score_lists def SCREAMING_SNAKE_CASE( __lowercase ) -> list[float]: A: list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(__lowercase ): A: str = final_scores[j] + ele return final_scores def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[float]]: A: Any = get_data(__lowercase ) A: str = calculate_each_score(__lowercase , __lowercase ) A: int = generate_final_scores(__lowercase ) # append scores to source data for i, ele in enumerate(__lowercase ): source_data[i].append(__lowercase ) return source_data
334
0