code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { 'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json', 'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json', 'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json', 'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json', 'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json', 'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json', 'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json', 'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json', 'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json', 'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json', } class lowerCamelCase__ ( UpperCAmelCase ): """simple docstring""" _UpperCamelCase : Optional[int] = 'xlm' _UpperCamelCase : str = { 'hidden_size': 'emb_dim', 'num_attention_heads': 'n_heads', 'num_hidden_layers': 'n_layers', 'n_words': 'vocab_size', # For backward compatibility } def __init__( self , snake_case=30145 , snake_case=2048 , snake_case=12 , snake_case=16 , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=False , snake_case=False , snake_case=False , snake_case=1 , snake_case=True , snake_case=512 , snake_case=2048**-0.5 , snake_case=1E-1_2 , snake_case=0.02 , snake_case=0 , snake_case=1 , snake_case=2 , snake_case=3 , snake_case=5 , snake_case=True , snake_case="first" , snake_case=True , snake_case=None , snake_case=True , snake_case=0.1 , snake_case=5 , snake_case=5 , snake_case=0 , snake_case=0 , snake_case=2 , snake_case=0 , **snake_case , ): '''simple docstring''' UpperCamelCase__ = vocab_size UpperCamelCase__ = emb_dim UpperCamelCase__ = n_layers UpperCamelCase__ = n_heads UpperCamelCase__ = dropout UpperCamelCase__ = attention_dropout UpperCamelCase__ = gelu_activation UpperCamelCase__ = sinusoidal_embeddings UpperCamelCase__ = causal UpperCamelCase__ = asm UpperCamelCase__ = n_langs UpperCamelCase__ = use_lang_emb UpperCamelCase__ = layer_norm_eps UpperCamelCase__ = bos_index UpperCamelCase__ = eos_index UpperCamelCase__ = pad_index UpperCamelCase__ = unk_index UpperCamelCase__ = mask_index UpperCamelCase__ = is_encoder UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = embed_init_std UpperCamelCase__ = init_std UpperCamelCase__ = summary_type UpperCamelCase__ = summary_use_proj UpperCamelCase__ = summary_activation UpperCamelCase__ = summary_proj_to_labels UpperCamelCase__ = summary_first_dropout UpperCamelCase__ = start_n_top UpperCamelCase__ = end_n_top UpperCamelCase__ = mask_token_id UpperCamelCase__ = lang_id if "n_words" in kwargs: UpperCamelCase__ = kwargs["n_words"] super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , **snake_case ) class lowerCamelCase__ ( UpperCAmelCase ): """simple docstring""" @property def snake_case__ ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase__ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
551
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) __UpperCamelCase = logging.getLogger() __UpperCamelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class lowerCamelCase__ ( UpperCAmelCase ): """simple docstring""" def snake_case__ ( self , snake_case ): '''simple docstring''' os.makedirs(snake_case , exist_ok=snake_case ) UpperCamelCase__ = {"source": "What is love ?", "target": "life"} UpperCamelCase__ = {"train": 12, "val": 2, "test": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: UpperCamelCase__ = "\n".join([contents[field]] * n_lines[split] ) with open(os.path.join(snake_case , F'''{split}.{field}''' ) , "w" ) as f: f.write(snake_case ) def snake_case__ ( self , snake_case , snake_case = "pytorch" ): '''simple docstring''' UpperCamelCase__ = self.get_auto_remove_tmp_dir() UpperCamelCase__ = os.path.join(snake_case , "output" ) UpperCamelCase__ = os.path.join(snake_case , "data" ) self._create_dummy_data(data_dir=snake_case ) UpperCamelCase__ = F''' --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ '''.split() if gpus > 0: testargs.append(F'''--gpus={gpus}''' ) if is_apex_available(): testargs.append("--fp16" ) else: testargs.append("--gpus=0" ) testargs.append("--distributed_backend=ddp_cpu" ) testargs.append("--num_processes=2" ) UpperCamelCase__ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(snake_case , env=self.get_env() ) UpperCamelCase__ = os.path.join(snake_case , "metrics.json" ) with open(snake_case ) as f: UpperCamelCase__ = json.load(snake_case ) return result @require_torch_gpu def snake_case__ ( self ): '''simple docstring''' UpperCamelCase__ = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu def snake_case__ ( self ): '''simple docstring''' UpperCamelCase__ = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_gpu @require_ray def snake_case__ ( self ): '''simple docstring''' UpperCamelCase__ = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu @require_ray def snake_case__ ( self ): '''simple docstring''' UpperCamelCase__ = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
551
1
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) a : Tuple = ["""model.decoder.embed_positions.weights"""] def __lowerCamelCase ( _lowercase ) -> Optional[Any]: if "emb" in name: UpperCAmelCase : List[str] = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: UpperCAmelCase : Optional[int] = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: UpperCAmelCase : Optional[int] = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: UpperCAmelCase : Optional[Any] = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: UpperCAmelCase : Union[str, Any] = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: UpperCAmelCase : Optional[Any] = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: UpperCAmelCase : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: UpperCAmelCase : Union[str, Any] = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: UpperCAmelCase : Union[str, Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: UpperCAmelCase : List[str] = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: UpperCAmelCase : Any = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple[Dict, Dict]: UpperCAmelCase : str = list(state_dict.keys() ) UpperCAmelCase : Optional[Any] = {} for key in keys: UpperCAmelCase : Optional[int] = state_dict.pop(lowercase_ ) UpperCAmelCase : List[Any] = rename_keys(lowercase_ ) if "in_proj_weight" in key: # split fused qkv proj UpperCAmelCase : Tuple = val[:hidden_size, :] UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :] UpperCAmelCase : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: UpperCAmelCase : Optional[Any] = val else: UpperCAmelCase : List[str] = val return state_dict, enc_dec_proj_state_dict def __lowerCamelCase ( _lowercase ) -> MusicgenDecoderConfig: if checkpoint == "small": # default config values UpperCAmelCase : List[Any] = 1_0_2_4 UpperCAmelCase : List[str] = 2_4 UpperCAmelCase : Any = 1_6 elif checkpoint == "medium": UpperCAmelCase : Tuple = 1_5_3_6 UpperCAmelCase : Dict = 4_8 UpperCAmelCase : Tuple = 2_4 elif checkpoint == "large": UpperCAmelCase : int = 2_0_4_8 UpperCAmelCase : Optional[int] = 4_8 UpperCAmelCase : Dict = 3_2 else: raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' ) UpperCAmelCase : str = MusicgenDecoderConfig( hidden_size=lowercase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase_ , num_attention_heads=lowercase_ , ) return config @torch.no_grad() def __lowerCamelCase ( _lowercase , _lowercase=None , _lowercase=None , _lowercase="cpu" ) -> List[str]: UpperCAmelCase : str = MusicGen.get_pretrained(lowercase_ , device=lowercase_ ) UpperCAmelCase : Union[str, Any] = decoder_config_from_checkpoint(lowercase_ ) UpperCAmelCase : Optional[int] = fairseq_model.lm.state_dict() UpperCAmelCase : Optional[Any] = rename_state_dict( lowercase_ , hidden_size=decoder_config.hidden_size ) UpperCAmelCase : Tuple = TaEncoderModel.from_pretrained("""t5-base""" ) UpperCAmelCase : Union[str, Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) UpperCAmelCase : str = MusicgenForCausalLM(lowercase_ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection UpperCAmelCase : str = decoder.load_state_dict(lowercase_ , strict=lowercase_ ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(lowercase_ ) if len(lowercase_ ) > 0: raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' ) if len(lowercase_ ) > 0: raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' ) # init the composite model UpperCAmelCase : str = MusicgenForConditionalGeneration(text_encoder=lowercase_ , audio_encoder=lowercase_ , decoder=lowercase_ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(lowercase_ ) # check we can do a forward pass UpperCAmelCase : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) UpperCAmelCase : Dict = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): UpperCAmelCase : Tuple = model(input_ids=lowercase_ , decoder_input_ids=lowercase_ ).logits if logits.shape != (8, 1, 2_0_4_8): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor UpperCAmelCase : int = AutoTokenizer.from_pretrained("""t5-base""" ) UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) UpperCAmelCase : Optional[int] = MusicgenProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ ) # set the appropriate bos/pad token ids UpperCAmelCase : str = 2_0_4_8 UpperCAmelCase : str = 2_0_4_8 # set other default generation config params UpperCAmelCase : Optional[Any] = int(3_0 * audio_encoder.config.frame_rate ) UpperCAmelCase : List[str] = True UpperCAmelCase : int = 3.0 if pytorch_dump_folder is not None: Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' ) model.save_pretrained(lowercase_ ) processor.save_pretrained(lowercase_ ) if repo_id: logger.info(F'''Pushing model {checkpoint} to {repo_id}''' ) model.push_to_hub(lowercase_ ) processor.push_to_hub(lowercase_ ) if __name__ == "__main__": a : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint""", default="""small""", type=str, help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""", ) parser.add_argument( """--pytorch_dump_folder""", required=True, default=None, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) parser.add_argument( """--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda.""" ) a : Optional[int] = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
707
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: a : int = None a : List[Any] = logging.get_logger(__name__) a : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} a : Union[str, Any] = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json""" ), }, } a : List[Any] = { """moussaKam/mbarthez""": 1_0_2_4, """moussaKam/barthez""": 1_0_2_4, """moussaKam/barthez-orangesum-title""": 1_0_2_4, } a : int = """▁""" class UpperCamelCase_ ( __magic_name__ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = BarthezTokenizer def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token super().__init__( A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , ) UpperCAmelCase : Union[str, Any] = vocab_file UpperCAmelCase : int = False if not self.vocab_file else True def _lowercase( self , A , A = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase : Optional[int] = [self.cls_token_id] UpperCAmelCase : List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowercase( self , A , A = None ) -> List[int]: UpperCAmelCase : Optional[int] = [self.sep_token_id] UpperCAmelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowercase( self , A , A = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase : str = os.path.join( A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ): copyfile(self.vocab_file , A ) return (out_vocab_file,)
672
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json""" ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class UpperCamelCase__ ( _lowerCAmelCase ): """simple docstring""" A__ : Optional[int] = "roformer" def __init__( self , SCREAMING_SNAKE_CASE__=50000 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1536 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1e-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ) -> Any: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) A__ = vocab_size A__ = hidden_size if embedding_size is None else embedding_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = rotary_value A__ = use_cache class UpperCamelCase__ ( _lowerCAmelCase ): """simple docstring""" @property def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": A__ = {0: "batch", 1: "choice", 2: "sequence"} else: A__ = {0: "batch", 1: "sequence"} A__ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
104
import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class _snake_case ( lowercase__): UpperCamelCase__ : List[str] =ComputeEnvironment.AMAZON_SAGEMAKER UpperCamelCase__ : Tuple =True UpperCamelCase__ : int ="""ml.p3.2xlarge""" UpperCamelCase__ : Any ="""accelerate_sagemaker_execution_role""" UpperCamelCase__ : Dict ="""hf-sm""" UpperCamelCase__ : Optional[int] ="""us-east-1""" UpperCamelCase__ : Optional[Any] =1 UpperCamelCase__ : int ="""accelerate-sagemaker-1""" UpperCamelCase__ : Union[str, Any] ="""1.6""" UpperCamelCase__ : str ="""4.4""" UpperCamelCase__ : str ="""train.py""" UpperCamelCase__ : Union[str, Any] =[ """--model_name_or_path""", """bert""", """--do_train""", """False""", """--epochs""", """3""", """--learning_rate""", """5e-5""", """--max_steps""", """50.5""", ] UpperCamelCase__ : str =[ """--model_name_or_path""", """bert""", """--do_train""", """--do_test""", """False""", """--do_predict""", """--epochs""", """3""", """--learning_rate""", """5e-5""", """--max_steps""", """50.5""", ] class _snake_case ( unittest.TestCase): def A__ ( self : Optional[int] ): # If no defaults are changed, `to_kwargs` returns an empty dict. lowercase__ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args["model_name_or_path"], __lowercase ) assert isinstance(converted_args["do_train"], __lowercase ) assert isinstance(converted_args["epochs"], __lowercase ) assert isinstance(converted_args["learning_rate"], __lowercase ) assert isinstance(converted_args["max_steps"], __lowercase ) with pytest.raises(__lowercase ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
413
0
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
713
from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging __a = logging.get_logger(__name__) def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Tuple: try: with open(lowerCAmelCase_ , """rb""" ) as flax_state_f: UpperCAmelCase = from_bytes(lowerCAmelCase_ , flax_state_f.read() ) except UnpicklingError as e: try: with open(lowerCAmelCase_ ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(lowerCAmelCase_ , lowerCAmelCase_ ) def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Dict: try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights UpperCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase_ : x.dtype == jnp.bfloataa , lowerCAmelCase_ ) ).values() if any(lowerCAmelCase_ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) UpperCAmelCase = jax.tree_util.tree_map( lambda lowerCAmelCase_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCAmelCase_ ) UpperCAmelCase = """""" UpperCAmelCase = flatten_dict(lowerCAmelCase_ , sep=""".""" ) UpperCAmelCase = pt_model.state_dict() # keep track of unexpected & missing keys UpperCAmelCase = [] UpperCAmelCase = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): UpperCAmelCase = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: UpperCAmelCase = flax_key_tuple_array[:-1] + ["""weight"""] UpperCAmelCase = jnp.transpose(lowerCAmelCase_ , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": UpperCAmelCase = flax_key_tuple_array[:-1] + ["""weight"""] UpperCAmelCase = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": UpperCAmelCase = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(lowerCAmelCase_ ): UpperCAmelCase = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) UpperCAmelCase = """.""".join(lowerCAmelCase_ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict UpperCAmelCase = np.asarray(lowerCAmelCase_ ) if not isinstance(lowerCAmelCase_ , np.ndarray ) else flax_tensor UpperCAmelCase = torch.from_numpy(lowerCAmelCase_ ) # remove from missing keys missing_keys.remove(lowerCAmelCase_ ) else: # weight is not expected by PyTorch model unexpected_keys.append(lowerCAmelCase_ ) pt_model.load_state_dict(lowerCAmelCase_ ) # re-transform missing_keys to list UpperCAmelCase = list(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(lowerCAmelCase_ ) > 0: logger.warning( F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" """ use it for predictions and inference.""" ) return pt_model
627
0
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("1.0.0a"): raise Exception("requires fairseq >= 1.0.0a") logging.set_verbosity_info() _UpperCAmelCase : List[Any] = logging.get_logger(__name__) _UpperCAmelCase : int = "Hello world! cécé herlolip" def lowerCAmelCase_ (lowercase__ : str , lowercase__ : str , lowercase__ : bool ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = FairseqRobertaModel.from_pretrained(snake_case__ ) roberta.eval() # disable dropout lowerCAmelCase__ = roberta.model.encoder.sentence_encoder lowerCAmelCase__ = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: lowerCAmelCase__ = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , snake_case__ ) lowerCAmelCase__ = XLMRobertaXLForSequenceClassification(snake_case__ ) if classification_head else XLMRobertaXLForMaskedLM(snake_case__ ) model.eval() # Now let's copy all the weights. # Embeddings lowerCAmelCase__ = roberta_sent_encoder.embed_tokens.weight lowerCAmelCase__ = roberta_sent_encoder.embed_positions.weight lowerCAmelCase__ = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. lowerCAmelCase__ = roberta_sent_encoder.layer_norm.weight lowerCAmelCase__ = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer lowerCAmelCase__ = model.roberta.encoder.layer[i] lowerCAmelCase__ = roberta_sent_encoder.layers[i] lowerCAmelCase__ = layer.attention lowerCAmelCase__ = roberta_layer.self_attn_layer_norm.weight lowerCAmelCase__ = roberta_layer.self_attn_layer_norm.bias # self attention lowerCAmelCase__ = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) lowerCAmelCase__ = roberta_layer.self_attn.q_proj.weight lowerCAmelCase__ = roberta_layer.self_attn.q_proj.bias lowerCAmelCase__ = roberta_layer.self_attn.k_proj.weight lowerCAmelCase__ = roberta_layer.self_attn.k_proj.bias lowerCAmelCase__ = roberta_layer.self_attn.v_proj.weight lowerCAmelCase__ = roberta_layer.self_attn.v_proj.bias # self-attention output lowerCAmelCase__ = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape lowerCAmelCase__ = roberta_layer.self_attn.out_proj.weight lowerCAmelCase__ = roberta_layer.self_attn.out_proj.bias # this one is final layer norm lowerCAmelCase__ = roberta_layer.final_layer_norm.weight lowerCAmelCase__ = roberta_layer.final_layer_norm.bias # intermediate lowerCAmelCase__ = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape lowerCAmelCase__ = roberta_layer.fca.weight lowerCAmelCase__ = roberta_layer.fca.bias # output lowerCAmelCase__ = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape lowerCAmelCase__ = roberta_layer.fca.weight lowerCAmelCase__ = roberta_layer.fca.bias # end of layer if classification_head: lowerCAmelCase__ = roberta.model.classification_heads['''mnli'''].dense.weight lowerCAmelCase__ = roberta.model.classification_heads['''mnli'''].dense.bias lowerCAmelCase__ = roberta.model.classification_heads['''mnli'''].out_proj.weight lowerCAmelCase__ = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head lowerCAmelCase__ = roberta.model.encoder.lm_head.dense.weight lowerCAmelCase__ = roberta.model.encoder.lm_head.dense.bias lowerCAmelCase__ = roberta.model.encoder.lm_head.layer_norm.weight lowerCAmelCase__ = roberta.model.encoder.lm_head.layer_norm.bias lowerCAmelCase__ = roberta.model.encoder.lm_head.weight lowerCAmelCase__ = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. lowerCAmelCase__ = roberta.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1 lowerCAmelCase__ = model(snake_case__ )[0] if classification_head: lowerCAmelCase__ = roberta.model.classification_heads['''mnli'''](roberta.extract_features(snake_case__ ) ) else: lowerCAmelCase__ = roberta.model(snake_case__ )[0] print(our_output.shape , their_output.shape ) lowerCAmelCase__ = torch.max(torch.abs(our_output - their_output ) ).item() print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7 lowerCAmelCase__ = torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) if __name__ == "__main__": _UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) _UpperCAmelCase : List[Any] = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
668
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = { 'vocab_file': 'vocab.json', 'tokenizer_config_file': 'tokenizer_config.json', 'merges_file': 'merges.txt', } __a = { 'vocab_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json' ), }, 'tokenizer_config_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json' ), }, 'merges_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt' ), }, } __a = '</w>' __a = '@@ ' def a ( snake_case__: Dict ): '''simple docstring''' lowercase_ = set() lowercase_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase_ = char return pairs # Speech2Text2 has no max input length __a = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4} class lowercase__( UpperCAmelCase ): """simple docstring""" a :int = VOCAB_FILES_NAMES a :Optional[int] = PRETRAINED_VOCAB_FILES_MAP a :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a :Tuple = ['input_ids', 'attention_mask'] def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="<pad>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : int="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , **SCREAMING_SNAKE_CASE_ : str , ) -> Optional[int]: super().__init__( unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) lowercase_ = do_lower_case with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle: lowercase_ = json.load(SCREAMING_SNAKE_CASE_ ) lowercase_ = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' ) lowercase_ = None lowercase_ = None else: with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle: lowercase_ = merges_handle.read().split('''\n''' )[:-1] lowercase_ = [tuple(merge.split()[:2] ) for merge in merges] lowercase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) lowercase_ = {} @property def _lowercase ( self : Any ) -> int: return len(self.decoder ) def _lowercase ( self : List[str] ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any ) -> int: lowercase_ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ ) if not pairs: return token while True: lowercase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowercase_ , lowercase_ = bigram lowercase_ = [] lowercase_ = 0 while i < len(SCREAMING_SNAKE_CASE_ ): try: lowercase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase_ = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase_ = tuple(SCREAMING_SNAKE_CASE_ ) lowercase_ = new_word if len(SCREAMING_SNAKE_CASE_ ) == 1: break else: lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ ) lowercase_ = ''' '''.join(SCREAMING_SNAKE_CASE_ ) if word == "\n " + BPE_TOKEN_MERGES: lowercase_ = '''\n''' + BPE_TOKEN_MERGES if word.endswith(SCREAMING_SNAKE_CASE_ ): lowercase_ = word.replace(SCREAMING_SNAKE_CASE_ , '''''' ) lowercase_ = word.replace(''' ''' , SCREAMING_SNAKE_CASE_ ) lowercase_ = word return word def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple: if self.bpe_ranks is None: raise ValueError( '''This tokenizer was instantiated without a `merges.txt` file, so''' ''' that it can only be used for decoding, not for encoding.''' '''Make sure to provide `merges.txt` file at instantiation to enable ''' '''encoding.''' ) if self.do_lower_case: lowercase_ = text.lower() lowercase_ = text.split() lowercase_ = [] for token in text: if token: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) ) return split_tokens def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ) -> int: return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> str: lowercase_ = self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token ) return result def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : List[str] ) -> str: lowercase_ = ''' '''.join(SCREAMING_SNAKE_CASE_ ) # make sure @@ tokens are concatenated lowercase_ = ''''''.join(string.split(SCREAMING_SNAKE_CASE_ ) ) return string def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase_ = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' ) lowercase_ = 0 if self.bpe_ranks is None: return (vocab_file,) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) lowercase_ = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) index += 1 return (vocab_file, merges_file)
97
0
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig snake_case__ : Dict = logging.get_logger(__name__) # General docstring snake_case__ : int = '''MobileNetV1Config''' # Base docstring snake_case__ : List[str] = '''google/mobilenet_v1_1.0_224''' snake_case__ : List[Any] = [1, 1_024, 7, 7] # Image classification docstring snake_case__ : Optional[int] = '''google/mobilenet_v1_1.0_224''' snake_case__ : List[Any] = '''tabby, tabby cat''' snake_case__ : Any = [ '''google/mobilenet_v1_1.0_224''', '''google/mobilenet_v1_0.75_192''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def _snake_case ( _snake_case : int , _snake_case : Tuple , _snake_case : Union[str, Any]=None ): lowerCAmelCase : str = {} if isinstance(_snake_case , _snake_case ): lowerCAmelCase : Union[str, Any] = model.mobilenet_va else: lowerCAmelCase : Optional[int] = model lowerCAmelCase : Dict = '''MobilenetV1/Conv2d_0/''' lowerCAmelCase : int = backbone.conv_stem.convolution.weight lowerCAmelCase : Dict = backbone.conv_stem.normalization.bias lowerCAmelCase : str = backbone.conv_stem.normalization.weight lowerCAmelCase : int = backbone.conv_stem.normalization.running_mean lowerCAmelCase : Dict = backbone.conv_stem.normalization.running_var for i in range(13 ): lowerCAmelCase : int = i + 1 lowerCAmelCase : Optional[int] = i * 2 lowerCAmelCase : List[Any] = backbone.layer[pt_index] lowerCAmelCase : Union[str, Any] = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/''' lowerCAmelCase : Dict = pointer.convolution.weight lowerCAmelCase : int = pointer.normalization.bias lowerCAmelCase : str = pointer.normalization.weight lowerCAmelCase : Any = pointer.normalization.running_mean lowerCAmelCase : Union[str, Any] = pointer.normalization.running_var lowerCAmelCase : Dict = backbone.layer[pt_index + 1] lowerCAmelCase : List[Any] = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/''' lowerCAmelCase : str = pointer.convolution.weight lowerCAmelCase : Any = pointer.normalization.bias lowerCAmelCase : Tuple = pointer.normalization.weight lowerCAmelCase : Optional[int] = pointer.normalization.running_mean lowerCAmelCase : Tuple = pointer.normalization.running_var if isinstance(_snake_case , _snake_case ): lowerCAmelCase : Optional[Any] = '''MobilenetV1/Logits/Conv2d_1c_1x1/''' lowerCAmelCase : List[Any] = model.classifier.weight lowerCAmelCase : int = model.classifier.bias return tf_to_pt_map def _snake_case ( _snake_case : int , _snake_case : Dict , _snake_case : List[str] ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( '''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ''' '''https://www.tensorflow.org/install/ for installation instructions.''' ) raise # Load weights from TF model lowerCAmelCase : Any = tf.train.list_variables(_snake_case ) lowerCAmelCase : Tuple = {} for name, shape in init_vars: logger.info(f'''Loading TF weight {name} with shape {shape}''' ) lowerCAmelCase : Any = tf.train.load_variable(_snake_case , _snake_case ) lowerCAmelCase : Tuple = array # Build TF to PyTorch weights loading map lowerCAmelCase : Optional[int] = _build_tf_to_pytorch_map(_snake_case , _snake_case , _snake_case ) for name, pointer in tf_to_pt_map.items(): logger.info(f'''Importing {name}''' ) if name not in tf_weights: logger.info(f'''{name} not in tf pre-trained weights, skipping''' ) continue lowerCAmelCase : Any = tf_weights[name] if "depthwise_weights" in name: logger.info('''Transposing depthwise''' ) lowerCAmelCase : str = np.transpose(_snake_case , (2, 3, 0, 1) ) elif "weights" in name: logger.info('''Transposing''' ) if len(pointer.shape ) == 2: # copying into linear layer lowerCAmelCase : Optional[Any] = array.squeeze().transpose() else: lowerCAmelCase : Dict = np.transpose(_snake_case , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' ) logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' ) lowerCAmelCase : Union[str, Any] = torch.from_numpy(_snake_case ) tf_weights.pop(_snake_case , _snake_case ) tf_weights.pop(name + '''/RMSProp''' , _snake_case ) tf_weights.pop(name + '''/RMSProp_1''' , _snake_case ) tf_weights.pop(name + '''/ExponentialMovingAverage''' , _snake_case ) logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' ) return model def _snake_case ( _snake_case : torch.Tensor , _snake_case : nn.Convad ): lowerCAmelCase, lowerCAmelCase : List[Any] = features.shape[-2:] lowerCAmelCase, lowerCAmelCase : str = conv_layer.stride lowerCAmelCase, lowerCAmelCase : Optional[int] = conv_layer.kernel_size if in_height % stride_height == 0: lowerCAmelCase : List[Any] = max(kernel_height - stride_height , 0 ) else: lowerCAmelCase : int = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: lowerCAmelCase : Union[str, Any] = max(kernel_width - stride_width , 0 ) else: lowerCAmelCase : Optional[int] = max(kernel_width - (in_width % stride_width) , 0 ) lowerCAmelCase : Tuple = pad_along_width // 2 lowerCAmelCase : Dict = pad_along_width - pad_left lowerCAmelCase : str = pad_along_height // 2 lowerCAmelCase : Dict = pad_along_height - pad_top lowerCAmelCase : Any = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(_snake_case , _snake_case , '''constant''' , 0.0 ) class snake_case_( nn.Module ): def __init__( self : Optional[Any] , UpperCamelCase_ : MobileNetVaConfig , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[bool] = True , UpperCamelCase_ : Optional[bool or str] = True , ): super().__init__() lowerCAmelCase : Any = config if in_channels % groups != 0: raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' ) if out_channels % groups != 0: raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' ) lowerCAmelCase : Any = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) lowerCAmelCase : Tuple = nn.Convad( in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , kernel_size=UpperCamelCase_ , stride=UpperCamelCase_ , padding=UpperCamelCase_ , groups=UpperCamelCase_ , bias=UpperCamelCase_ , padding_mode='''zeros''' , ) if use_normalization: lowerCAmelCase : str = nn.BatchNormad( num_features=UpperCamelCase_ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase_ , track_running_stats=UpperCamelCase_ , ) else: lowerCAmelCase : Optional[Any] = None if use_activation: if isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowerCAmelCase : Tuple = ACTaFN[use_activation] elif isinstance(config.hidden_act , UpperCamelCase_ ): lowerCAmelCase : Any = ACTaFN[config.hidden_act] else: lowerCAmelCase : Union[str, Any] = config.hidden_act else: lowerCAmelCase : List[str] = None def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : torch.Tensor ): if self.config.tf_padding: lowerCAmelCase : List[Any] = apply_tf_padding(UpperCamelCase_ , self.convolution ) lowerCAmelCase : Dict = self.convolution(UpperCamelCase_ ) if self.normalization is not None: lowerCAmelCase : Dict = self.normalization(UpperCamelCase_ ) if self.activation is not None: lowerCAmelCase : Any = self.activation(UpperCamelCase_ ) return features class snake_case_( a__ ): __UpperCamelCase = MobileNetVaConfig __UpperCamelCase = load_tf_weights_in_mobilenet_va __UpperCamelCase = '''mobilenet_v1''' __UpperCamelCase = '''pixel_values''' __UpperCamelCase = False def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Union[nn.Linear, nn.Convad] ): if isinstance(UpperCamelCase_ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(UpperCamelCase_ , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) snake_case__ : Optional[Any] = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' snake_case__ : List[str] = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , a__ , ) class snake_case_( a__ ): def __init__( self : Dict , UpperCamelCase_ : MobileNetVaConfig , UpperCamelCase_ : bool = True ): super().__init__(UpperCamelCase_ ) lowerCAmelCase : Any = config lowerCAmelCase : List[Any] = 3_2 lowerCAmelCase : Optional[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) lowerCAmelCase : List[Any] = MobileNetVaConvLayer( UpperCamelCase_ , in_channels=config.num_channels , out_channels=UpperCamelCase_ , kernel_size=3 , stride=2 , ) lowerCAmelCase : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] lowerCAmelCase : Optional[int] = nn.ModuleList() for i in range(1_3 ): lowerCAmelCase : List[Any] = out_channels if strides[i] == 2 or i == 0: depth *= 2 lowerCAmelCase : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( UpperCamelCase_ , in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase_ , ) ) self.layer.append( MobileNetVaConvLayer( UpperCamelCase_ , in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , kernel_size=1 , ) ) lowerCAmelCase : str = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowerCamelCase__ ( self : int , UpperCamelCase_ : Dict ): raise NotImplementedError @add_start_docstrings_to_model_forward(UpperCamelCase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[bool] = None , ): lowerCAmelCase : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('''You have to specify pixel_values''' ) lowerCAmelCase : str = self.conv_stem(UpperCamelCase_ ) lowerCAmelCase : Tuple = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): lowerCAmelCase : Optional[int] = layer_module(UpperCamelCase_ ) if output_hidden_states: lowerCAmelCase : Union[str, Any] = all_hidden_states + (hidden_states,) lowerCAmelCase : Any = hidden_states if self.pooler is not None: lowerCAmelCase : int = torch.flatten(self.pooler(UpperCamelCase_ ) , start_dim=1 ) else: lowerCAmelCase : Any = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=UpperCamelCase_ , pooler_output=UpperCamelCase_ , hidden_states=UpperCamelCase_ , ) @add_start_docstrings( ''' MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , a__ , ) class snake_case_( a__ ): def __init__( self : Optional[Any] , UpperCamelCase_ : MobileNetVaConfig ): super().__init__(UpperCamelCase_ ) lowerCAmelCase : int = config.num_labels lowerCAmelCase : Union[str, Any] = MobileNetVaModel(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head lowerCAmelCase : List[str] = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = nn.Linear(UpperCamelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCamelCase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[bool] = None , ): lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase : int = self.mobilenet_va(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ ) lowerCAmelCase : int = outputs.pooler_output if return_dict else outputs[1] lowerCAmelCase : Optional[int] = self.classifier(self.dropout(UpperCamelCase_ ) ) lowerCAmelCase : Union[str, Any] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCAmelCase : int = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCAmelCase : Union[str, Any] = '''single_label_classification''' else: lowerCAmelCase : Any = '''multi_label_classification''' if self.config.problem_type == "regression": lowerCAmelCase : Tuple = MSELoss() if self.num_labels == 1: lowerCAmelCase : int = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowerCAmelCase : Union[str, Any] = loss_fct(UpperCamelCase_ , UpperCamelCase_ ) elif self.config.problem_type == "single_label_classification": lowerCAmelCase : Union[str, Any] = CrossEntropyLoss() lowerCAmelCase : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCAmelCase : Any = BCEWithLogitsLoss() lowerCAmelCase : Tuple = loss_fct(UpperCamelCase_ , UpperCamelCase_ ) if not return_dict: lowerCAmelCase : Tuple = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=UpperCamelCase_ , logits=UpperCamelCase_ , hidden_states=outputs.hidden_states , )
637
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example snake_case__ : int = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example snake_case__ : Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def _snake_case ( _snake_case : list[list[int]] ): lowerCAmelCase : Union[str, Any] = [] for i in range(len(_snake_case ) ): lowerCAmelCase : Any = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours lowerCAmelCase : Optional[int] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(_snake_case ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(_snake_case ) - 1: neighbour_count += cells[i + 1][j] if i < len(_snake_case ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. lowerCAmelCase : str = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(_snake_case ) return next_generation def _snake_case ( _snake_case : list[list[int]] , _snake_case : int ): lowerCAmelCase : int = [] for _ in range(_snake_case ): # Create output image lowerCAmelCase : Union[str, Any] = Image.new('''RGB''' , (len(cells[0] ), len(_snake_case )) ) lowerCAmelCase : Union[str, Any] = img.load() # Save cells to image for x in range(len(_snake_case ) ): for y in range(len(cells[0] ) ): lowerCAmelCase : Optional[int] = 255 - cells[y][x] * 255 lowerCAmelCase : List[Any] = (colour, colour, colour) # Save image images.append(_snake_case ) lowerCAmelCase : Union[str, Any] = new_generation(_snake_case ) return images if __name__ == "__main__": snake_case__ : Union[str, Any] = generate_images(GLIDER, 16) images[0].save('''out.gif''', save_all=True, append_images=images[1:])
637
1
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def A__ ( __A : Optional[int] ) ->bool: __A =int(number**0.5 ) return number == sq * sq def A__ ( __A : Dict , __A : Union[str, Any] , __A : str , __A : Union[str, Any] , __A : List[str] , __A : str ) ->tuple[int, int]: __A =x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den __A =x_den * y_den * z_den __A =gcd(__lowerCAmelCase , __lowerCAmelCase ) top //= hcf bottom //= hcf return top, bottom def A__ ( __A : Optional[Any] = 35 ) ->int: __A =set() __A =42 __A =Fraction(0 ) __A =42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 __A =x_num * y_den + x_den * y_num __A =x_den * y_den __A =gcd(__lowerCAmelCase , __lowerCAmelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __A =add_three( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) unique_s.add(__lowerCAmelCase ) # n=2 __A =( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) __A =x_den * x_den * y_den * y_den if is_sq(__lowerCAmelCase ) and is_sq(__lowerCAmelCase ): __A =int(sqrt(__lowerCAmelCase ) ) __A =int(sqrt(__lowerCAmelCase ) ) __A =gcd(__lowerCAmelCase , __lowerCAmelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __A =add_three( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) unique_s.add(__lowerCAmelCase ) # n=-1 __A =x_num * y_num __A =x_den * y_num + x_num * y_den __A =gcd(__lowerCAmelCase , __lowerCAmelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __A =add_three( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) unique_s.add(__lowerCAmelCase ) # n=2 __A =x_num * x_num * y_num * y_num __A =( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(__lowerCAmelCase ) and is_sq(__lowerCAmelCase ): __A =int(sqrt(__lowerCAmelCase ) ) __A =int(sqrt(__lowerCAmelCase ) ) __A =gcd(__lowerCAmelCase , __lowerCAmelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __A =add_three( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) unique_s.add(__lowerCAmelCase ) for num, den in unique_s: total += Fraction(__lowerCAmelCase , __lowerCAmelCase ) return total.denominator + total.numerator if __name__ == "__main__": print(F"""{solution() = }""")
184
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCAmelCase : List[Any] = logging.get_logger(__name__) __lowerCAmelCase : int = "▁" __lowerCAmelCase : int = { "vocab_file": "vocab.json", "spm_file": "sentencepiece.bpe.model", } __lowerCAmelCase : Tuple = { "vocab_file": { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json" ), }, "spm_file": { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model" ) }, } __lowerCAmelCase : List[str] = { "facebook/s2t-small-librispeech-asr": 1_024, } __lowerCAmelCase : int = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"] __lowerCAmelCase : str = {"mustc": MUSTC_LANGS} class __lowerCAmelCase ( lowerCAmelCase_ ): """simple docstring""" A__ : Any = VOCAB_FILES_NAMES A__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP A__ : Optional[int] = MAX_MODEL_INPUT_SIZES A__ : List[Any] = ['''input_ids''', '''attention_mask'''] A__ : List[int] = [] def __init__( self : List[str] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Dict="<s>" , _snake_case : Optional[Any]="</s>" , _snake_case : str="<pad>" , _snake_case : Optional[int]="<unk>" , _snake_case : Optional[int]=False , _snake_case : Any=False , _snake_case : List[Any]=None , _snake_case : Any=None , _snake_case : Optional[Dict[str, Any]] = None , **_snake_case : Optional[Any] , ): __lowercase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , do_upper_case=_snake_case , do_lower_case=_snake_case , tgt_lang=_snake_case , lang_codes=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , ) __lowercase : Optional[Any] = do_upper_case __lowercase : Optional[int] = do_lower_case __lowercase : Optional[Any] = load_json(_snake_case ) __lowercase : str = {v: k for k, v in self.encoder.items()} __lowercase : Any = spm_file __lowercase : List[str] = load_spm(_snake_case , self.sp_model_kwargs ) if lang_codes is not None: __lowercase : Any = lang_codes __lowercase : Dict = LANGUAGES[lang_codes] __lowercase : Any = [F'<lang:{lang}>' for lang in self.langs] __lowercase : Optional[Any] = {lang: self.sp_model.PieceToId(F'<lang:{lang}>' ) for lang in self.langs} __lowercase : Dict = self.lang_tokens __lowercase : Optional[Any] = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: __lowercase : int = {} @property def snake_case_ ( self : Optional[int] ): return len(self.encoder ) @property def snake_case_ ( self : Any ): return self._tgt_lang @tgt_lang.setter def snake_case_ ( self : int , _snake_case : str ): __lowercase : int = new_tgt_lang self.set_tgt_lang_special_tokens(_snake_case ) def snake_case_ ( self : Union[str, Any] , _snake_case : str ): __lowercase : Tuple = self.lang_code_to_id[tgt_lang] __lowercase : Optional[int] = [lang_code_id] def snake_case_ ( self : str , _snake_case : str ): return self.sp_model.encode(_snake_case , out_type=_snake_case ) def snake_case_ ( self : Any , _snake_case : List[str] ): return self.encoder.get(_snake_case , self.encoder[self.unk_token] ) def snake_case_ ( self : List[Any] , _snake_case : int ): return self.decoder.get(_snake_case , self.unk_token ) def snake_case_ ( self : str , _snake_case : List[str] ): __lowercase : Optional[Any] = [] __lowercase : Optional[Any] = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: __lowercase : Union[str, Any] = self.sp_model.decode(_snake_case ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " __lowercase : Optional[Any] = [] else: current_sub_tokens.append(_snake_case ) __lowercase : Optional[int] = self.sp_model.decode(_snake_case ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def snake_case_ ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Any=None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def snake_case_ ( self : int , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case ) __lowercase : Any = [1] * len(self.prefix_tokens ) __lowercase : Optional[Any] = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_snake_case )) + suffix_ones return prefix_ones + ([0] * len(_snake_case )) + ([0] * len(_snake_case )) + suffix_ones def snake_case_ ( self : Any ): __lowercase : Optional[Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ): __lowercase : Optional[int] = self.__dict__.copy() __lowercase : Dict = None return state def __setstate__( self : Optional[Any] , _snake_case : Dict ): __lowercase : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowercase : int = {} __lowercase : List[str] = load_spm(self.spm_file , self.sp_model_kwargs ) def snake_case_ ( self : List[str] , _snake_case : str , _snake_case : Optional[str] = None ): __lowercase : Dict = Path(_snake_case ) assert save_dir.is_dir(), F'{save_directory} should be a directory' __lowercase : Tuple = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) __lowercase : List[Any] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , _snake_case ) if os.path.abspath(self.spm_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _snake_case ) elif not os.path.isfile(self.spm_file ): with open(_snake_case , '''wb''' ) as fi: __lowercase : Dict = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (str(_snake_case ), str(_snake_case )) def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> sentencepiece.SentencePieceProcessor: __lowercase : Union[str, Any] = sentencepiece.SentencePieceProcessor(**__lowerCAmelCase ) spm.Load(str(__lowerCAmelCase ) ) return spm def UpperCAmelCase_ ( __lowerCAmelCase ) -> Union[Dict, List]: with open(__lowerCAmelCase , '''r''' ) as f: return json.load(__lowerCAmelCase ) def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> None: with open(__lowerCAmelCase , '''w''' ) as f: json.dump(__lowerCAmelCase , __lowerCAmelCase , indent=2 )
509
0
'''simple docstring''' def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : Tuple = [0] * len(__lowerCAmelCase ) _UpperCAmelCase : Dict = [] _UpperCAmelCase : Optional[Any] = [] _UpperCAmelCase : Any = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(__lowerCAmelCase ) ): if indegree[i] == 0: queue.append(__lowerCAmelCase ) while queue: _UpperCAmelCase : List[Any] = queue.pop(0 ) cnt += 1 topo.append(__lowerCAmelCase ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(__lowerCAmelCase ) if cnt != len(__lowerCAmelCase ): print("Cycle exists" ) else: print(__lowerCAmelCase ) # Adjacency List of Graph lowerCamelCase__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
40
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { 'configuration_instructblip': [ 'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InstructBlipConfig', 'InstructBlipQFormerConfig', 'InstructBlipVisionConfig', ], 'processing_instructblip': ['InstructBlipProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ 'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'InstructBlipQFormerModel', 'InstructBlipPreTrainedModel', 'InstructBlipForConditionalGeneration', 'InstructBlipVisionModel', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
40
1
"""simple docstring""" from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Any ): lowerCAmelCase = k_size // 2 lowerCAmelCase ,lowerCAmelCase = mgrid[0 - center : k_size - center, 0 - center : k_size - center] lowerCAmelCase = 1 / (2 * pi * sigma) * exp(-(square(_UpperCAmelCase ) + square(_UpperCAmelCase )) / (2 * square(_UpperCAmelCase )) ) return g def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] ): lowerCAmelCase ,lowerCAmelCase = image.shape[0], image.shape[1] # dst image height and width lowerCAmelCase = height - k_size + 1 lowerCAmelCase = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows lowerCAmelCase = zeros((dst_height * dst_width, k_size * k_size) ) lowerCAmelCase = 0 for i, j in product(range(_UpperCAmelCase ) , range(_UpperCAmelCase ) ): lowerCAmelCase = ravel(image[i : i + k_size, j : j + k_size] ) lowerCAmelCase = window row += 1 # turn the kernel into shape(k*k, 1) lowerCAmelCase = gen_gaussian_kernel(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = ravel(_UpperCAmelCase ) # reshape and get the dst image lowerCAmelCase = dot(_UpperCAmelCase , _UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase ) return dst if __name__ == "__main__": # read original image __UpperCamelCase : Tuple = imread(R'''../image_data/lena.jpg''') # turn image in gray scale value __UpperCamelCase : int = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size __UpperCamelCase : Optional[Any] = gaussian_filter(gray, 3, sigma=1) __UpperCamelCase : Tuple = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow('''gaussian filter with 3x3 mask''', gaussianaxa) imshow('''gaussian filter with 5x5 mask''', gaussianaxa) waitKey()
4
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase__ ( __snake_case ): __snake_case : Optional[Any] = ["image_processor", "tokenizer"] __snake_case : Tuple = "BridgeTowerImageProcessor" __snake_case : List[str] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self ,A__ ,A__ ): super().__init__(A__ ,A__ ) def __call__( self ,A__ ,A__ = None ,A__ = True ,A__ = False ,A__ = None ,A__ = None ,A__ = 0 ,A__ = None ,A__ = None ,A__ = None ,A__ = False ,A__ = False ,A__ = False ,A__ = False ,A__ = True ,A__ = None ,**A__ ,): _A : List[Any] = self.tokenizer( text=A__ ,add_special_tokens=A__ ,padding=A__ ,truncation=A__ ,max_length=A__ ,stride=A__ ,pad_to_multiple_of=A__ ,return_token_type_ids=A__ ,return_attention_mask=A__ ,return_overflowing_tokens=A__ ,return_special_tokens_mask=A__ ,return_offsets_mapping=A__ ,return_length=A__ ,verbose=A__ ,return_tensors=A__ ,**A__ ,) # add pixel_values + pixel_mask _A : Optional[Any] = self.image_processor( A__ ,return_tensors=A__ ,do_normalize=A__ ,do_center_crop=A__ ,**A__ ) encoding.update(A__ ) return encoding def A__ ( self ,*A__ ,**A__ ): return self.tokenizer.batch_decode(*A__ ,**A__ ) def A__ ( self ,*A__ ,**A__ ): return self.tokenizer.decode(*A__ ,**A__ ) @property def A__ ( self ): _A : Union[str, Any] = self.tokenizer.model_input_names _A : str = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
206
0
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time lowerCAmelCase_ : Optional[int] = Lock() def UpperCAmelCase ( A : Union[str, Any] , A : Any , A : Union[str, Any] , A : List[str] , A : Optional[int] , A : List[Any] , A : Optional[int] ): global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(A ) process_lock.release() # receive your right neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE : Any = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left SCREAMING_SNAKE_CASE : int = min(A , A ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(A ) process_lock.release() # receive your left neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE : Union[str, Any] = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right SCREAMING_SNAKE_CASE : str = max(A , A ) # after all swaps are performed, send the values back to main result_pipe[1].send(A ) def UpperCAmelCase ( A : Dict ): SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Union[str, Any] = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop SCREAMING_SNAKE_CASE : Any = Pipe() SCREAMING_SNAKE_CASE : str = Pipe() process_array_.append( Process( target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) SCREAMING_SNAKE_CASE : str = temp_rs SCREAMING_SNAKE_CASE : str = temp_rr for i in range(1 , len(A ) - 1 ): SCREAMING_SNAKE_CASE : Union[str, Any] = Pipe() SCREAMING_SNAKE_CASE : Optional[int] = Pipe() process_array_.append( Process( target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) SCREAMING_SNAKE_CASE : str = temp_rs SCREAMING_SNAKE_CASE : Optional[int] = temp_rr process_array_.append( Process( target=A , args=( len(A ) - 1, arr[len(A ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(A ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(A ) ): SCREAMING_SNAKE_CASE : int = result_pipe[p][0].recv() process_array_[p].join() return arr def UpperCAmelCase ( ): SCREAMING_SNAKE_CASE : str = list(range(10 , 0 , -1 ) ) print('''Initial List''' ) print(*A ) SCREAMING_SNAKE_CASE : str = odd_even_transposition(A ) print('''Sorted List\n''' ) print(*A ) if __name__ == "__main__": main()
464
'''simple docstring''' import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] , A : Any ): SCREAMING_SNAKE_CASE : List[str] = 1.5 SCREAMING_SNAKE_CASE : str = int(factor * num_class_images ) SCREAMING_SNAKE_CASE : Union[str, Any] = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=A , aesthetic_weight=0.1 ) os.makedirs(F"""{class_data_dir}/images""" , exist_ok=A ) if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: SCREAMING_SNAKE_CASE : Union[str, Any] = client.query(text=A ) if len(A ) >= factor * num_class_images or num_images > 1e4: break else: SCREAMING_SNAKE_CASE : Optional[int] = int(factor * num_images ) SCREAMING_SNAKE_CASE : str = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=A , aesthetic_weight=0.1 , ) SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = 0 SCREAMING_SNAKE_CASE : Tuple = tqdm(desc='''downloading real regularization images''' , total=A ) with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open( F"""{class_data_dir}/images.txt""" , '''w''' ) as fa: while total < num_class_images: SCREAMING_SNAKE_CASE : int = class_images[count] count += 1 try: SCREAMING_SNAKE_CASE : int = requests.get(images['''url'''] ) if img.status_code == 200: SCREAMING_SNAKE_CASE : List[str] = Image.open(BytesIO(img.content ) ) with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f: f.write(img.content ) fa.write(images['''caption'''] + '''\n''' ) fa.write(images['''url'''] + '''\n''' ) fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def UpperCAmelCase ( ): SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser('''''' , add_help=A ) parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=A , type=A ) parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=A , type=A ) parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=A ) return parser.parse_args() if __name__ == "__main__": lowerCAmelCase_ : List[str] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
464
1
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : Tuple = inspect.getfile(accelerate.test_utils ) lowerCamelCase_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] ) lowerCamelCase_ : Union[str, Any] = ['''accelerate''', '''launch'''] lowerCamelCase_ : Tuple = Path.home() / '''.cache/huggingface/accelerate''' lowerCamelCase_ : Tuple = '''default_config.yaml''' lowerCamelCase_ : str = config_folder / config_file lowerCamelCase_ : List[Any] = config_folder / '''_default_config.yaml''' lowerCamelCase_ : Dict = Path('''tests/test_configs''' ) @classmethod def lowerCamelCase (cls ) -> Dict: '''simple docstring''' if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def lowerCamelCase (cls ) -> Any: '''simple docstring''' if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Dict = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ): with self.subTest(config_file=__magic_name__ ): execute_subprocess_async( self.base_cmd + ['''--config_file''', str(__magic_name__ ), self.test_file_path] , env=os.environ.copy() ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() ) class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : List[str] = '''test-tpu''' lowerCamelCase_ : Dict = '''us-central1-a''' lowerCamelCase_ : Any = '''ls''' lowerCamelCase_ : Dict = ['''accelerate''', '''tpu-config'''] lowerCamelCase_ : Tuple = '''cd /usr/share''' lowerCamelCase_ : List[Any] = '''tests/test_samples/test_command_file.sh''' lowerCamelCase_ : List[Any] = '''Running gcloud compute tpus tpu-vm ssh''' def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : int = run_command( self.cmd + ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Optional[int] = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__magic_name__ ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Any = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--command''', '''echo "Hello World"''', '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : str = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Tuple = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command_file''', self.command_file, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Any = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , ) def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--accelerate_version''', '''12.0.0''', '''--debug''', ] , return_stdout=__magic_name__ , ) self.assertIn( F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
60
"""simple docstring""" import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class lowercase ( _UpperCAmelCase , unittest.TestCase ): _SCREAMING_SNAKE_CASE = CanineTokenizer _SCREAMING_SNAKE_CASE = False def _snake_case ( self ) -> Any: super().setUp() lowerCAmelCase = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _snake_case ( self ) -> Union[str, Any]: return CanineTokenizer.from_pretrained("""google/canine-s""" ) def _snake_case ( self , **lowercase ) -> CanineTokenizer: lowerCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase ) lowerCAmelCase = 1_024 return tokenizer @require_torch def _snake_case ( self ) -> str: lowerCAmelCase = self.canine_tokenizer lowerCAmelCase = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off lowerCAmelCase = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0] # fmt: on lowerCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors="""pt""" ) self.assertIsInstance(lowercase , lowercase ) lowerCAmelCase = list(batch.input_ids.numpy()[0] ) self.assertListEqual(lowercase , lowercase ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def _snake_case ( self ) -> Optional[Any]: lowerCAmelCase = self.canine_tokenizer lowerCAmelCase = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] lowerCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , lowercase ) self.assertIn("""attention_mask""" , lowercase ) self.assertIn("""token_type_ids""" , lowercase ) @require_torch def _snake_case ( self ) -> Optional[int]: lowerCAmelCase = self.canine_tokenizer lowerCAmelCase = [ """What's the weater?""", """It's about 25 degrees.""", ] lowerCAmelCase = tokenizer( text_target=lowercase , max_length=32 , padding="""max_length""" , truncation=lowercase , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def _snake_case ( self ) -> Tuple: # safety check on max_len default value so we are sure the test works lowerCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test lowerCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = """ He is very happy, UNwant\u00E9d,running""" lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) lowerCAmelCase = tokenizer.__class__.from_pretrained(lowercase ) lowerCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) shutil.rmtree(lowercase ) lowerCAmelCase = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = """ He is very happy, UNwant\u00E9d,running""" lowerCAmelCase = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: lowerCAmelCase = chr(0XE_007 ) additional_special_tokens.append(lowercase ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) lowerCAmelCase = tokenizer.__class__.from_pretrained(lowercase ) lowerCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) self.assertIn(lowercase , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) lowerCAmelCase = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(lowercase ) def _snake_case ( self ) -> Tuple: lowerCAmelCase = self.get_tokenizers(do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): lowerCAmelCase , lowerCAmelCase = self.get_clean_sequence(lowercase ) # a special token for Canine can be defined as follows: lowerCAmelCase = 0XE_005 lowerCAmelCase = chr(lowercase ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertEqual(len(lowercase ) , 1 ) lowerCAmelCase = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowercase ) lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertEqual(lowercase , input_encoded + special_token_id ) lowerCAmelCase = tokenizer.decode(lowercase , skip_special_tokens=lowercase ) self.assertTrue(special_token not in decoded ) def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = self.get_tokenizers(do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): lowerCAmelCase = chr(0XE_005 ) lowerCAmelCase = chr(0XE_006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowercase ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) lowerCAmelCase = tokenizer.tokenize(lowercase ) lowerCAmelCase = tokenizer.tokenize(lowercase ) self.assertEqual(len(lowercase ) , 1 ) self.assertEqual(len(lowercase ) , 1 ) self.assertEqual(token_a[0] , lowercase ) self.assertEqual(token_a[0] , lowercase ) @require_tokenizers def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = self.get_tokenizers(do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): # a special token for Canine can be defined as follows: lowerCAmelCase = 0XE_006 lowerCAmelCase = chr(lowercase ) lowerCAmelCase = AddedToken(lowercase , lstrip=lowercase ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(lowercase ) tokenizer.from_pretrained(lowercase ) def _snake_case ( self ) -> Dict: lowerCAmelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowercase ) with open(os.path.join(lowercase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: lowerCAmelCase = json.load(lowercase ) with open(os.path.join(lowercase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: lowerCAmelCase = json.load(lowercase ) # a special token for Canine can be defined as follows: lowerCAmelCase = 0XE_006 lowerCAmelCase = chr(lowercase ) lowerCAmelCase = [new_token_a] lowerCAmelCase = [new_token_a] with open(os.path.join(lowercase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(lowercase , lowercase ) with open(os.path.join(lowercase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(lowercase , lowercase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowerCAmelCase = tokenizer_class.from_pretrained(lowercase , extra_ids=0 ) self.assertIn(lowercase , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) lowerCAmelCase = 0XE_007 lowerCAmelCase = chr(lowercase ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowerCAmelCase = [AddedToken(lowercase , lstrip=lowercase )] lowerCAmelCase = tokenizer_class.from_pretrained( lowercase , additional_special_tokens=lowercase , extra_ids=0 ) self.assertIn(lowercase , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def _snake_case ( self ) -> Tuple: lowerCAmelCase = self.get_tokenizers(do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): lowerCAmelCase = """hello world""" if self.space_between_special_tokens: lowerCAmelCase = """[CLS] hello world [SEP]""" else: lowerCAmelCase = input lowerCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) lowerCAmelCase = tokenizer.decode(lowercase , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(lowercase , [output, output.lower()] ) def _snake_case ( self ) -> List[str]: lowerCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): lowerCAmelCase = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] lowerCAmelCase = """a""" lowerCAmelCase = ord(lowercase ) for attr in attributes_list: setattr(lowercase , attr + """_id""" , lowercase ) self.assertEqual(getattr(lowercase , lowercase ) , lowercase ) self.assertEqual(getattr(lowercase , attr + """_id""" ) , lowercase ) setattr(lowercase , attr + """_id""" , lowercase ) self.assertEqual(getattr(lowercase , lowercase ) , lowercase ) self.assertEqual(getattr(lowercase , attr + """_id""" ) , lowercase ) setattr(lowercase , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(lowercase , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(lowercase , """additional_special_tokens_ids""" ) , [] ) lowerCAmelCase = 0XE_006 lowerCAmelCase = chr(lowercase ) setattr(lowercase , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(lowercase , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(lowercase , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def _snake_case ( self ) -> Tuple: pass def _snake_case ( self ) -> Optional[Any]: pass def _snake_case ( self ) -> Dict: pass def _snake_case ( self ) -> str: pass def _snake_case ( self ) -> Optional[int]: pass def _snake_case ( self ) -> Optional[int]: pass def _snake_case ( self ) -> Union[str, Any]: pass def _snake_case ( self ) -> Optional[Any]: pass
532
0
'''simple docstring''' from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''efficientformer''' def __init__( self : Dict , _A : List[int] = [3, 2, 6, 4] , _A : List[int] = [48, 96, 224, 448] , _A : List[bool] = [True, True, True, True] , _A : int = 448 , _A : int = 32 , _A : int = 4 , _A : int = 7 , _A : int = 5 , _A : int = 8 , _A : int = 4 , _A : float = 0.0 , _A : int = 16 , _A : int = 3 , _A : int = 3 , _A : int = 3 , _A : int = 2 , _A : int = 1 , _A : float = 0.0 , _A : int = 1 , _A : bool = True , _A : bool = True , _A : float = 1e-5 , _A : str = "gelu" , _A : float = 0.02 , _A : float = 1e-12 , _A : int = 224 , _A : float = 1e-05 , **_A : Tuple , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Optional[int] = hidden_act __SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : List[str] = hidden_sizes __SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers __SCREAMING_SNAKE_CASE : List[str] = num_attention_heads __SCREAMING_SNAKE_CASE : str = initializer_range __SCREAMING_SNAKE_CASE : Any = layer_norm_eps __SCREAMING_SNAKE_CASE : Dict = patch_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels __SCREAMING_SNAKE_CASE : List[Any] = depths __SCREAMING_SNAKE_CASE : Optional[Any] = mlp_expansion_ratio __SCREAMING_SNAKE_CASE : List[str] = downsamples __SCREAMING_SNAKE_CASE : str = dim __SCREAMING_SNAKE_CASE : Any = key_dim __SCREAMING_SNAKE_CASE : Tuple = attention_ratio __SCREAMING_SNAKE_CASE : Dict = resolution __SCREAMING_SNAKE_CASE : Dict = pool_size __SCREAMING_SNAKE_CASE : List[str] = downsample_patch_size __SCREAMING_SNAKE_CASE : int = downsample_stride __SCREAMING_SNAKE_CASE : Optional[Any] = downsample_pad __SCREAMING_SNAKE_CASE : Optional[int] = drop_path_rate __SCREAMING_SNAKE_CASE : Optional[Any] = num_metaad_blocks __SCREAMING_SNAKE_CASE : int = distillation __SCREAMING_SNAKE_CASE : List[Any] = use_layer_scale __SCREAMING_SNAKE_CASE : Optional[int] = layer_scale_init_value __SCREAMING_SNAKE_CASE : Dict = image_size __SCREAMING_SNAKE_CASE : List[str] = batch_norm_eps
716
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int] , _A : Optional[int] , _A : Tuple=7 , _A : Optional[int]=3 , _A : Optional[Any]=18 , _A : Dict=30 , _A : str=400 , _A : Optional[int]=True , _A : str=None , _A : str=True , _A : str=None , _A : List[str]=True , ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : List[str] = parent __SCREAMING_SNAKE_CASE : Dict = batch_size __SCREAMING_SNAKE_CASE : List[Any] = num_channels __SCREAMING_SNAKE_CASE : Union[str, Any] = image_size __SCREAMING_SNAKE_CASE : Union[str, Any] = min_resolution __SCREAMING_SNAKE_CASE : Tuple = max_resolution __SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize __SCREAMING_SNAKE_CASE : int = size __SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop __SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size __SCREAMING_SNAKE_CASE : Optional[int] = do_flip_channel_order def UpperCAmelCase__ ( self : str ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileViTImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''center_crop''' ) ) self.assertTrue(hasattr(_A , '''do_flip_channel_order''' ) ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" pass def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : int = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
131
0
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' def __init__( self : Union[str, Any] , lowerCamelCase : TransformeraDModel , lowerCamelCase : AutoencoderKL , lowerCamelCase : KarrasDiffusionSchedulers , lowerCamelCase : Optional[Dict[int, str]] = None , ) -> List[str]: """simple docstring""" super().__init__() self.register_modules(transformer=lowerCamelCase , vae=lowerCamelCase , scheduler=lowerCamelCase ) # create a imagenet -> id dictionary for easier use _UpperCAmelCase = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(""",""" ): _UpperCAmelCase = int(lowerCamelCase ) _UpperCAmelCase = dict(sorted(self.labels.items() ) ) def lowerCamelCase ( self : List[str] , lowerCamelCase : Union[str, List[str]] ) -> List[int]: """simple docstring""" if not isinstance(lowerCamelCase , lowerCamelCase ): _UpperCAmelCase = list(lowerCamelCase ) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : float = 4.0 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : int = 50 , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" _UpperCAmelCase = len(lowerCamelCase ) _UpperCAmelCase = self.transformer.config.sample_size _UpperCAmelCase = self.transformer.config.in_channels _UpperCAmelCase = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCamelCase , device=self.device , dtype=self.transformer.dtype , ) _UpperCAmelCase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents _UpperCAmelCase = torch.tensor(lowerCamelCase , device=self.device ).reshape(-1 ) _UpperCAmelCase = torch.tensor([1000] * batch_size , device=self.device ) _UpperCAmelCase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(lowerCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: _UpperCAmelCase = latent_model_input[: len(lowerCamelCase ) // 2] _UpperCAmelCase = torch.cat([half, half] , dim=0 ) _UpperCAmelCase = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) _UpperCAmelCase = t if not torch.is_tensor(lowerCamelCase ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) _UpperCAmelCase = latent_model_input.device.type == """mps""" if isinstance(lowerCamelCase , lowerCamelCase ): _UpperCAmelCase = torch.floataa if is_mps else torch.floataa else: _UpperCAmelCase = torch.intaa if is_mps else torch.intaa _UpperCAmelCase = torch.tensor([timesteps] , dtype=lowerCamelCase , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: _UpperCAmelCase = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML _UpperCAmelCase = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output _UpperCAmelCase = self.transformer( lowerCamelCase , timestep=lowerCamelCase , class_labels=lowerCamelCase ).sample # perform guidance if guidance_scale > 1: _UpperCAmelCase , _UpperCAmelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] _UpperCAmelCase , _UpperCAmelCase = torch.split(lowerCamelCase , len(lowerCamelCase ) // 2 , dim=0 ) _UpperCAmelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps) _UpperCAmelCase = torch.cat([half_eps, half_eps] , dim=0 ) _UpperCAmelCase = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: _UpperCAmelCase , _UpperCAmelCase = torch.split(lowerCamelCase , lowerCamelCase , dim=1 ) else: _UpperCAmelCase = noise_pred # compute previous image: x_t -> x_t-1 _UpperCAmelCase = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample if guidance_scale > 1: _UpperCAmelCase , _UpperCAmelCase = latent_model_input.chunk(2 , dim=0 ) else: _UpperCAmelCase = latent_model_input _UpperCAmelCase = 1 / self.vae.config.scaling_factor * latents _UpperCAmelCase = self.vae.decode(lowerCamelCase ).sample _UpperCAmelCase = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _UpperCAmelCase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _UpperCAmelCase = self.numpy_to_pil(lowerCamelCase ) if not return_dict: return (samples,) return ImagePipelineOutput(images=lowerCamelCase )
108
from math import factorial def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0 ) -> int: return sum(int(snake_case__ ) for x in str(factorial(snake_case__ ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
312
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: lowerCAmelCase_ : int = None lowerCAmelCase_ : List[Any] = logging.get_logger(__name__) lowerCAmelCase_ : Optional[int] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase_ : Tuple = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase_ : List[Any] = { """google/bigbird-roberta-base""": 4096, """google/bigbird-roberta-large""": 4096, """google/bigbird-base-trivia-itc""": 4096, } lowerCAmelCase_ : Tuple = """▁""" class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = BigBirdTokenizer UpperCAmelCase__ = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ = [] def __init__( self : List[Any] , lowercase__ : Union[str, Any]=None , lowercase__ : Dict=None , lowercase__ : Any="<unk>" , lowercase__ : Tuple="<s>" , lowercase__ : Union[str, Any]="</s>" , lowercase__ : Tuple="<pad>" , lowercase__ : int="[SEP]" , lowercase__ : Union[str, Any]="[MASK]" , lowercase__ : Optional[Any]="[CLS]" , **lowercase__ : Any , ) ->Union[str, Any]: '''simple docstring''' _UpperCamelCase : Union[str, Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else bos_token _UpperCamelCase : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else eos_token _UpperCamelCase : Dict = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else unk_token _UpperCamelCase : Dict = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else pad_token _UpperCamelCase : List[str] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else cls_token _UpperCamelCase : Optional[int] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase : Union[str, Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token super().__init__( lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , **lowercase__ , ) _UpperCamelCase : Optional[Any] = vocab_file _UpperCamelCase : Optional[int] = False if not self.vocab_file else True def snake_case__ ( self : Optional[int] , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None ) ->List[int]: '''simple docstring''' _UpperCamelCase : List[str] = [self.sep_token_id] _UpperCamelCase : int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def snake_case__ ( self : List[Any] , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None , lowercase__ : bool = False ) ->List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(lowercase__ )) + [1] return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1] def snake_case__ ( self : Any , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None ) ->List[int]: '''simple docstring''' _UpperCamelCase : Optional[Any] = [self.sep_token_id] _UpperCamelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__ ( self : List[str] , lowercase__ : str , lowercase__ : Optional[str] = None ) ->Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowercase__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _UpperCamelCase : Optional[int] = os.path.join( lowercase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ): copyfile(self.vocab_file , lowercase__ ) return (out_vocab_file,)
204
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCAmelCase_ : int = logging.get_logger(__name__) lowerCAmelCase_ : Any = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCAmelCase__ = '''gptj''' UpperCAmelCase__ = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Optional[Any] , lowercase__ : Union[str, Any]=50_400 , lowercase__ : Union[str, Any]=2_048 , lowercase__ : Tuple=4_096 , lowercase__ : List[str]=28 , lowercase__ : Optional[int]=16 , lowercase__ : str=64 , lowercase__ : Any=None , lowercase__ : Any="gelu_new" , lowercase__ : Union[str, Any]=0.0 , lowercase__ : Optional[Any]=0.0 , lowercase__ : Any=0.0 , lowercase__ : Tuple=1e-5 , lowercase__ : Any=0.0_2 , lowercase__ : int=True , lowercase__ : int=50_256 , lowercase__ : Any=50_256 , lowercase__ : Tuple=False , **lowercase__ : str , ) ->Optional[Any]: '''simple docstring''' _UpperCamelCase : Dict = vocab_size _UpperCamelCase : List[str] = n_positions _UpperCamelCase : Union[str, Any] = n_embd _UpperCamelCase : Union[str, Any] = n_layer _UpperCamelCase : Optional[Any] = n_head _UpperCamelCase : Dict = n_inner _UpperCamelCase : Optional[Any] = rotary_dim _UpperCamelCase : Tuple = activation_function _UpperCamelCase : List[Any] = resid_pdrop _UpperCamelCase : Any = embd_pdrop _UpperCamelCase : Optional[Any] = attn_pdrop _UpperCamelCase : Optional[Any] = layer_norm_epsilon _UpperCamelCase : Union[str, Any] = initializer_range _UpperCamelCase : Optional[int] = use_cache _UpperCamelCase : str = bos_token_id _UpperCamelCase : Any = eos_token_id super().__init__( bos_token_id=lowercase__ , eos_token_id=lowercase__ , tie_word_embeddings=lowercase__ , **lowercase__ ) class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Optional[int] , lowercase__ : PretrainedConfig , lowercase__ : str = "default" , lowercase__ : List[PatchingSpec] = None , lowercase__ : bool = False , ) ->Union[str, Any]: '''simple docstring''' super().__init__(lowercase__ , task=lowercase__ , patching_specs=lowercase__ , use_past=lowercase__ ) if not getattr(self._config , "pad_token_id" , lowercase__ ): # TODO: how to do that better? _UpperCamelCase : Optional[int] = 0 @property def snake_case__ ( self : List[str] ) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' _UpperCamelCase : List[str] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(lowercase__ , direction="inputs" ) _UpperCamelCase : str = {0: "batch", 1: "past_sequence + sequence"} else: _UpperCamelCase : Optional[Any] = {0: "batch", 1: "sequence"} return common_inputs @property def snake_case__ ( self : int ) ->int: '''simple docstring''' return self._config.n_layer @property def snake_case__ ( self : Dict ) ->int: '''simple docstring''' return self._config.n_head def snake_case__ ( self : int , lowercase__ : PreTrainedTokenizer , lowercase__ : int = -1 , lowercase__ : int = -1 , lowercase__ : bool = False , lowercase__ : Optional[TensorType] = None , ) ->Mapping[str, Any]: '''simple docstring''' _UpperCamelCase : int = super(lowercase__ , self ).generate_dummy_inputs( lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ ) # We need to order the input in the way they appears in the forward() _UpperCamelCase : Tuple = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _UpperCamelCase , _UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values _UpperCamelCase : Optional[int] = seqlen + 2 _UpperCamelCase : str = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _UpperCamelCase : Dict = [ (torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(self.num_layers ) ] _UpperCamelCase : str = common_inputs["attention_mask"] if self.use_past: _UpperCamelCase : int = ordered_inputs["attention_mask"].dtype _UpperCamelCase : Optional[int] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(lowercase__ , lowercase__ , dtype=lowercase__ )] , dim=1 ) return ordered_inputs @property def snake_case__ ( self : Tuple ) ->int: '''simple docstring''' return 13
204
1
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING lowercase : Tuple = logging.get_logger(__name__) @add_end_docstrings(__snake_case ) class A ( __snake_case ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) requires_backends(self , '''decord''' ) self.check_model_type(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Optional[Any]: """simple docstring""" A : Tuple = {} if frame_sampling_rate is not None: A : Optional[int] = frame_sampling_rate if num_frames is not None: A : Optional[Any] = num_frames A : Optional[int] = {} if top_k is not None: A : Union[str, Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 ) -> int: """simple docstring""" if num_frames is None: A : List[Any] = self.model.config.num_frames if video.startswith('''http://''' ) or video.startswith('''https://''' ): A : Optional[int] = BytesIO(requests.get(SCREAMING_SNAKE_CASE ).content ) A : List[Any] = VideoReader(SCREAMING_SNAKE_CASE ) videoreader.seek(0 ) A : List[Any] = 0 A : str = num_frames * frame_sampling_rate - 1 A : Tuple = np.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num=SCREAMING_SNAKE_CASE , dtype=np.intaa ) A : Optional[int] = videoreader.get_batch(SCREAMING_SNAKE_CASE ).asnumpy() A : Union[str, Any] = list(SCREAMING_SNAKE_CASE ) A : Dict = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=self.framework ) return model_inputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Dict = self.model(**SCREAMING_SNAKE_CASE ) return model_outputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> Any: """simple docstring""" if top_k > self.model.config.num_labels: A : Dict = self.model.config.num_labels if self.framework == "pt": A : Optional[Any] = model_outputs.logits.softmax(-1 )[0] A, A : Any = probs.topk(SCREAMING_SNAKE_CASE ) else: raise ValueError(F'Unsupported framework: {self.framework}' ) A : List[str] = scores.tolist() A : Dict = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
634
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class A ( __snake_case , unittest.TestCase ): __magic_name__ = DebertaTokenizer __magic_name__ = True __magic_name__ = DebertaTokenizerFast def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A : Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] A : List[str] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) A : List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] A : Tuple = {'''unk_token''': '''[UNK]'''} A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) A : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : Union[str, Any] = '''lower newer''' A : List[Any] = '''lower newer''' return input_text, output_text def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : str = self.get_tokenizer() A : List[str] = '''lower newer''' A : int = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] A : Any = tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : str = tokens + [tokenizer.unk_token] A : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : List[Any] = self.get_tokenizer() A : Tuple = tokenizer('''Hello''' , '''World''' ) A : str = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) A : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer.encode( '''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE ) A : List[str] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Any = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: A : List[str] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) A : Dict = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] A : List[Any] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE ) A : List[str] = [tokenizer.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']] # fmt: off A : List[str] = { '''input_ids''': [ [1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on A : Any = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE ) for expected, decoded in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
634
1
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( __A , __A , unittest.TestCase ): '''simple docstring''' _lowercase = StableDiffusionDiffEditPipeline _lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} _lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} _lowercase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _lowercase = frozenset([] ) def __lowerCamelCase ( self ): torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : List[str] =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) SCREAMING_SNAKE_CASE_ : Optional[Any] =DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_zero=__UpperCAmelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : List[str] =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : List[Any] =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , ) SCREAMING_SNAKE_CASE_ : str =CLIPTextModel(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : int =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE_ : Tuple ={ 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): SCREAMING_SNAKE_CASE_ : Any =floats_tensor((1, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[int] =floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) if str(__UpperCAmelCase ).startswith('mps' ): SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.manual_seed(__UpperCAmelCase ) else: SCREAMING_SNAKE_CASE_ : Any =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[Any] ={ 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): SCREAMING_SNAKE_CASE_ : Dict =floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE_ : List[Any] =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' ) if str(__UpperCAmelCase ).startswith('mps' ): SCREAMING_SNAKE_CASE_ : Optional[int] =torch.manual_seed(__UpperCAmelCase ) else: SCREAMING_SNAKE_CASE_ : List[Any] =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] ={ 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): SCREAMING_SNAKE_CASE_ : str =floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE_ : List[Any] =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' ) if str(__UpperCAmelCase ).startswith('mps' ): SCREAMING_SNAKE_CASE_ : Dict =torch.manual_seed(__UpperCAmelCase ) else: SCREAMING_SNAKE_CASE_ : Tuple =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] ={ 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def __lowerCamelCase ( self ): if not hasattr(self.pipeline_class , '_optional_components' ): return SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components() SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.pipeline_class(**__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) SCREAMING_SNAKE_CASE_ : Tuple =self.get_dummy_inputs(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] =pipe(**__UpperCAmelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] =self.pipeline_class.from_pretrained(__UpperCAmelCase ) pipe_loaded.to(__UpperCAmelCase ) pipe_loaded.set_progress_bar_config(disable=__UpperCAmelCase ) for optional_component in pipe._optional_components: self.assertTrue( getattr(__UpperCAmelCase , __UpperCAmelCase ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , ) SCREAMING_SNAKE_CASE_ : Tuple =self.get_dummy_inputs(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] =pipe_loaded(**__UpperCAmelCase )[0] SCREAMING_SNAKE_CASE_ : str =np.abs(output - output_loaded ).max() self.assertLess(__UpperCAmelCase , 1E-4 ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : Optional[int] ='cpu' SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components() SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.pipeline_class(**__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_dummy_mask_inputs(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : int =pipe.generate_mask(**__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] =mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) SCREAMING_SNAKE_CASE_ : str =np.array([0] * 9 ) SCREAMING_SNAKE_CASE_ : Optional[Any] =np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(__UpperCAmelCase , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : int ='cpu' SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components() SCREAMING_SNAKE_CASE_ : List[str] =self.pipeline_class(**__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Dict =self.get_dummy_inversion_inputs(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =pipe.invert(**__UpperCAmelCase ).images SCREAMING_SNAKE_CASE_ : str =image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE_ : Tuple =np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) SCREAMING_SNAKE_CASE_ : int =np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__UpperCAmelCase , 1E-3 ) def __lowerCamelCase ( self ): super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : int ='cpu' SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.get_dummy_components() SCREAMING_SNAKE_CASE_ : Dict ={'beta_start': 0.00_085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE_ : str =DPMSolverMultistepScheduler(**__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =DPMSolverMultistepInverseScheduler(**__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] =self.pipeline_class(**__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[int] =self.get_dummy_inversion_inputs(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] =pipe.invert(**__UpperCAmelCase ).images SCREAMING_SNAKE_CASE_ : Optional[Any] =image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) SCREAMING_SNAKE_CASE_ : Optional[Any] =np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__UpperCAmelCase , 1E-3 ) @require_torch_gpu @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __lowerCamelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def __lowerCamelCase ( cls ): SCREAMING_SNAKE_CASE_ : Tuple =load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) SCREAMING_SNAKE_CASE_ : Any =raw_image.convert('RGB' ).resize((768, 768) ) SCREAMING_SNAKE_CASE_ : Optional[int] =raw_image def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : Optional[int] =torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : Dict =StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE_ : Dict =DDIMScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE_ : Optional[Any] =DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] ='a bowl of fruit' SCREAMING_SNAKE_CASE_ : Optional[int] ='a bowl of pears' SCREAMING_SNAKE_CASE_ : int =pipe.generate_mask( image=self.raw_image , source_prompt=__UpperCAmelCase , target_prompt=__UpperCAmelCase , generator=__UpperCAmelCase , ) SCREAMING_SNAKE_CASE_ : Optional[int] =pipe.invert( prompt=__UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCAmelCase ).latents SCREAMING_SNAKE_CASE_ : List[Any] =pipe( prompt=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_latents=__UpperCAmelCase , generator=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , inpaint_strength=0.7 , output_type='numpy' , ).images[0] SCREAMING_SNAKE_CASE_ : Optional[Any] =( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : List[str] =torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_ : Optional[int] =StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE_ : Any =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE_ : Any =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : str ='a bowl of fruit' SCREAMING_SNAKE_CASE_ : str ='a bowl of pears' SCREAMING_SNAKE_CASE_ : int =pipe.generate_mask( image=self.raw_image , source_prompt=__UpperCAmelCase , target_prompt=__UpperCAmelCase , generator=__UpperCAmelCase , ) SCREAMING_SNAKE_CASE_ : Optional[Any] =pipe.invert( prompt=__UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCAmelCase , num_inference_steps=25 , ).latents SCREAMING_SNAKE_CASE_ : Union[str, Any] =pipe( prompt=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_latents=__UpperCAmelCase , generator=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0] SCREAMING_SNAKE_CASE_ : List[str] =( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
708
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCAmelCase_ ( __A ): '''simple docstring''' def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
153
0
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCAmelCase__: Any = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase ) class snake_case_ ( lowerCAmelCase ): def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def __A ( self , __lowerCAmelCase=None ): SCREAMING_SNAKE_CASE_ : str = {} if top_k is not None: SCREAMING_SNAKE_CASE_ : Dict = top_k return {}, {}, postprocess_params def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ): return super().__call__(__lowerCAmelCase , **__lowerCAmelCase ) def __A ( self , __lowerCAmelCase ): SCREAMING_SNAKE_CASE_ : List[str] = load_image(__lowerCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) return model_inputs def __A ( self , __lowerCAmelCase ): SCREAMING_SNAKE_CASE_ : str = self.model(**__lowerCAmelCase ) return model_outputs def __A ( self , __lowerCAmelCase , __lowerCAmelCase=5 ): if top_k > self.model.config.num_labels: SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": SCREAMING_SNAKE_CASE_ : List[Any] = model_outputs.logits.softmax(-1 )[0] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = probs.topk(__lowerCAmelCase ) elif self.framework == "tf": SCREAMING_SNAKE_CASE_ : Optional[Any] = stable_softmax(model_outputs.logits , axis=-1 )[0] SCREAMING_SNAKE_CASE_ : List[str] = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F'Unsupported framework: {self.framework}' ) SCREAMING_SNAKE_CASE_ : List[str] = scores.tolist() SCREAMING_SNAKE_CASE_ : Tuple = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
345
import argparse import logging import pickle from collections import Counter logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) lowerCAmelCase__: List[str] = logging.getLogger(__name__) if __name__ == "__main__": lowerCAmelCase__: Optional[Any] = argparse.ArgumentParser( description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)" ) parser.add_argument( "--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset." ) parser.add_argument( "--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file." ) parser.add_argument("--vocab_size", default=3_0522, type=int) lowerCAmelCase__: Union[str, Any] = parser.parse_args() logger.info(f'''Loading data from {args.data_file}''') with open(args.data_file, "rb") as fp: lowerCAmelCase__: List[Any] = pickle.load(fp) logger.info("Counting occurrences for MLM.") lowerCAmelCase__: List[Any] = Counter() for tk_ids in data: counter.update(tk_ids) lowerCAmelCase__: Union[str, Any] = [0] * args.vocab_size for k, v in counter.items(): lowerCAmelCase__: Tuple = v logger.info(f'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, "wb") as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
345
1
'''simple docstring''' import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Union[str, Any] = logging.get_logger(__name__) _A : Union[str, Any] = { '''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''', '''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Union[str, Any] = """encodec""" def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , SCREAMING_SNAKE_CASE__ : Tuple=2_40_00 , SCREAMING_SNAKE_CASE__ : Dict=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_28 , SCREAMING_SNAKE_CASE__ : Dict=32 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Tuple=[8, 5, 4, 2] , SCREAMING_SNAKE_CASE__ : str="weight_norm" , SCREAMING_SNAKE_CASE__ : Dict=7 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="reflect" , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Dict=1.0 , SCREAMING_SNAKE_CASE__ : List[str]=10_24 , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: __lowerCAmelCase = target_bandwidths __lowerCAmelCase = sampling_rate __lowerCAmelCase = audio_channels __lowerCAmelCase = normalize __lowerCAmelCase = chunk_length_s __lowerCAmelCase = overlap __lowerCAmelCase = hidden_size __lowerCAmelCase = num_filters __lowerCAmelCase = num_residual_layers __lowerCAmelCase = upsampling_ratios __lowerCAmelCase = norm_type __lowerCAmelCase = kernel_size __lowerCAmelCase = last_kernel_size __lowerCAmelCase = residual_kernel_size __lowerCAmelCase = dilation_growth_rate __lowerCAmelCase = use_causal_conv __lowerCAmelCase = pad_mode __lowerCAmelCase = compress __lowerCAmelCase = num_lstm_layers __lowerCAmelCase = trim_right_ratio __lowerCAmelCase = codebook_size __lowerCAmelCase = codebook_dim if codebook_dim is not None else hidden_size __lowerCAmelCase = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" ) super().__init__(**SCREAMING_SNAKE_CASE__ ) @property def a ( self : Any ) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def a ( self : str ) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def a ( self : int ) -> int: __lowerCAmelCase = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def a ( self : Union[str, Any] ) -> int: return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
710
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging _A : str = logging.get_logger(__name__) class _lowercase ( UpperCAmelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE : List[str] = ["""audio_values""", """audio_mask"""] def __init__( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Tuple=[16, 16] , SCREAMING_SNAKE_CASE__ : Optional[int]=1_28 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4_41_00 , SCREAMING_SNAKE_CASE__ : Tuple=86 , SCREAMING_SNAKE_CASE__ : List[str]=20_48 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Tuple: super().__init__( feature_size=SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , padding_value=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) __lowerCAmelCase = spectrogram_length __lowerCAmelCase = num_channels __lowerCAmelCase = patch_size __lowerCAmelCase = feature_size // self.patch_size[1] __lowerCAmelCase = n_fft __lowerCAmelCase = sampling_rate // hop_length_to_sampling_rate __lowerCAmelCase = sampling_rate __lowerCAmelCase = padding_value __lowerCAmelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=SCREAMING_SNAKE_CASE__ , norm="""slaney""" , mel_scale="""slaney""" , ).T def a ( self : str , SCREAMING_SNAKE_CASE__ : np.array ) -> np.ndarray: __lowerCAmelCase = spectrogram( SCREAMING_SNAKE_CASE__ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=8_0.0 , ) __lowerCAmelCase = log_spec[:, :-1] __lowerCAmelCase = log_spec - 2_0.0 __lowerCAmelCase = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = True , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) __lowerCAmelCase = isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __lowerCAmelCase = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ): __lowerCAmelCase = np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __lowerCAmelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __lowerCAmelCase = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __lowerCAmelCase = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , SCREAMING_SNAKE_CASE__ ): __lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __lowerCAmelCase = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __lowerCAmelCase = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __lowerCAmelCase = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) # convert into correct format for padding __lowerCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __lowerCAmelCase = np.ones([len(SCREAMING_SNAKE_CASE__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __lowerCAmelCase = padded_audio_features * self.padding_value for i in range(len(SCREAMING_SNAKE_CASE__ ) ): __lowerCAmelCase = audio_features[i] __lowerCAmelCase = feature # return as BatchFeature if return_attention_mask: __lowerCAmelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: __lowerCAmelCase = {"""audio_values""": padded_audio_features} __lowerCAmelCase = BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ ) return encoded_inputs
330
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available lowerCamelCase__ : Optional[Any] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Tuple = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys lowerCamelCase__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
12
import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename _snake_case = '''http://www.mocksite.com/file1.txt''' _snake_case = '''"text": ["foo", "foo"]''' _snake_case = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8''' class UpperCAmelCase_ : '''simple docstring''' __A : List[Any] = 200 __A : List[Any] = {"Content-Length": "100"} __A : int = {} def _snake_case ( self , **__A ): """simple docstring""" return [bytes(__A , "utf-8" )] def lowercase_( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return MockResponse() @pytest.mark.parametrize("urls_type" , [str, list, dict] ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' import requests monkeypatch.setattr(SCREAMING_SNAKE_CASE_ , "request" , SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Optional[int] = URL if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowerCamelCase : int = url elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowerCamelCase : Optional[Any] = [url] elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowerCamelCase : int = {"train": url} lowerCamelCase : Union[str, Any] = "dummy" lowerCamelCase : Optional[Any] = "downloads" lowerCamelCase : Tuple = tmp_path lowerCamelCase : str = DownloadConfig( cache_dir=os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , use_etag=SCREAMING_SNAKE_CASE_ , ) lowerCamelCase : List[Any] = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Tuple = dl_manager.download(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[str] = urls for downloaded_paths in [downloaded_paths]: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowerCamelCase : Tuple = [downloaded_paths] lowerCamelCase : int = [urls] elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): assert "train" in downloaded_paths.keys() lowerCamelCase : Dict = downloaded_paths.values() lowerCamelCase : List[Any] = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): assert downloaded_path == dl_manager.downloaded_paths[input_url] lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Union[str, Any] = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() lowerCamelCase : str = downloaded_path.read_text() assert content == CONTENT lowerCamelCase : List[str] = downloaded_path.with_suffix(".json" ) assert metadata_downloaded_path.exists() lowerCamelCase : str = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize("paths_type" , [str, list, dict] ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : int = str(SCREAMING_SNAKE_CASE_ ) if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowerCamelCase : Any = filename elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowerCamelCase : List[Any] = [filename] elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowerCamelCase : Any = {"train": filename} lowerCamelCase : int = "dummy" lowerCamelCase : List[Any] = xz_file.parent lowerCamelCase : str = "extracted" lowerCamelCase : Optional[Any] = DownloadConfig( cache_dir=SCREAMING_SNAKE_CASE_ , use_etag=SCREAMING_SNAKE_CASE_ , ) lowerCamelCase : Union[str, Any] = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[str] = dl_manager.extract(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[Any] = paths for extracted_paths in [extracted_paths]: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowerCamelCase : List[str] = [extracted_paths] lowerCamelCase : Union[str, Any] = [paths] elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): assert "train" in extracted_paths.keys() lowerCamelCase : List[str] = extracted_paths.values() lowerCamelCase : Optional[int] = paths.values() assert extracted_paths for extracted_path, input_path in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): assert extracted_path == dl_manager.extracted_paths[input_path] lowerCamelCase : List[Any] = Path(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Union[str, Any] = extracted_path.parts assert parts[-1] == hash_url_to_filename(SCREAMING_SNAKE_CASE_ , etag=SCREAMING_SNAKE_CASE_ ) assert parts[-2] == extracted_subdir assert extracted_path.exists() lowerCamelCase : Union[str, Any] = extracted_path.read_text() lowerCamelCase : str = text_file.read_text() assert extracted_file_content == expected_file_content def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' assert path.endswith(".jsonl" ) for num_items, line in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ): lowerCamelCase : Optional[Any] = json.loads(line.decode("utf-8" ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Optional[Any] = request.getfixturevalue(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Optional[Any] = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE_ ) , start=1 ): _test_jsonl(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert num_jsonl == 2 @pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] ) def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : Union[str, Any] = request.getfixturevalue(SCREAMING_SNAKE_CASE_ ) lowerCamelCase : Optional[Any] = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE_ ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE_ ) , start=1 ): _test_jsonl(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert num_tar == 1 assert num_jsonl == 2 def lowercase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' lowerCamelCase : List[Any] = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) , start=1 ): assert os.path.basename(SCREAMING_SNAKE_CASE_ ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
340
0
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase: Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase ( snake_case , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = XLMRobertaTokenizer SCREAMING_SNAKE_CASE_ : int = XLMRobertaTokenizerFast SCREAMING_SNAKE_CASE_ : Dict = True SCREAMING_SNAKE_CASE_ : Dict = True def lowerCamelCase__ ( self ): super().setUp() # We have a SentencePiece fixture for testing _lowercase : str = XLMRobertaTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self ): _lowercase : str = """<pad>""" _lowercase : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ ) def lowerCamelCase__ ( self ): _lowercase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<s>""" ) self.assertEqual(vocab_keys[1] ,"""<pad>""" ) self.assertEqual(vocab_keys[-1] ,"""<mask>""" ) self.assertEqual(len(UpperCAmelCase_ ) ,10_02 ) def lowerCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,10_02 ) def lowerCamelCase__ ( self ): _lowercase : Any = XLMRobertaTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ ) _lowercase : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,) _lowercase : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] ,) _lowercase : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] ,) _lowercase : Any = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] ,) def lowerCamelCase__ ( self ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _lowercase : int = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _lowercase : Tuple = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ ) _lowercase : Any = tempfile.mkdtemp() _lowercase : Any = tokenizer_r.save_pretrained(UpperCAmelCase_ ) _lowercase : Optional[Any] = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) _lowercase : Tuple = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) # Checks everything loads correctly in the same way _lowercase : Union[str, Any] = tokenizer_r.from_pretrained(UpperCAmelCase_ ) _lowercase : Tuple = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ ,UpperCAmelCase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCAmelCase_ ) # Save tokenizer rust, legacy_format=True _lowercase : List[str] = tempfile.mkdtemp() _lowercase : List[Any] = tokenizer_r.save_pretrained(UpperCAmelCase_ ,legacy_format=UpperCAmelCase_ ) _lowercase : List[str] = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it save with the same files self.assertSequenceEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) # Checks everything loads correctly in the same way _lowercase : Dict = tokenizer_r.from_pretrained(UpperCAmelCase_ ) _lowercase : Tuple = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ ,UpperCAmelCase_ ) ) shutil.rmtree(UpperCAmelCase_ ) # Save tokenizer rust, legacy_format=False _lowercase : Any = tempfile.mkdtemp() _lowercase : str = tokenizer_r.save_pretrained(UpperCAmelCase_ ,legacy_format=UpperCAmelCase_ ) _lowercase : Any = tokenizer_p.save_pretrained(UpperCAmelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowercase : Tuple = tokenizer_r.from_pretrained(UpperCAmelCase_ ) _lowercase : List[Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase_ ,UpperCAmelCase_ ) ) shutil.rmtree(UpperCAmelCase_ ) @cached_property def lowerCamelCase__ ( self ): return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" ) def lowerCamelCase__ ( self ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(UpperCAmelCase_ ,f.name ) _lowercase : Any = XLMRobertaTokenizer(f.name ,keep_accents=UpperCAmelCase_ ) _lowercase : int = pickle.dumps(UpperCAmelCase_ ) pickle.loads(UpperCAmelCase_ ) def lowerCamelCase__ ( self ): if not self.test_rust_tokenizer: return _lowercase : str = self.get_tokenizer() _lowercase : Union[str, Any] = self.get_rust_tokenizer() _lowercase : Optional[Any] = """I was born in 92000, and this is falsé.""" _lowercase : int = tokenizer.tokenize(UpperCAmelCase_ ) _lowercase : int = rust_tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) _lowercase : str = tokenizer.encode(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ) _lowercase : List[str] = rust_tokenizer.encode(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) _lowercase : str = self.get_rust_tokenizer() _lowercase : Optional[Any] = tokenizer.encode(UpperCAmelCase_ ) _lowercase : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ ,UpperCAmelCase_ ) @slow def lowerCamelCase__ ( self ): _lowercase : Tuple = """Hello World!""" _lowercase : Optional[Any] = [0, 3_53_78, 66_61, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(UpperCAmelCase_ ,self.big_tokenizer.encode(UpperCAmelCase_ ) ) @slow def lowerCamelCase__ ( self ): _lowercase : List[str] = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) _lowercase : List[Any] = [ 0, 32_93, 83, 10, 45_52, 49_89, 79_86, 6_78, 10, 59_15, 1_11, 17_94_59, 12_48_50, 4, 60_44, 2_37, 12, 6, 5, 6, 4, 67_80, 7_05, 15, 13_88, 44, 3_78, 1_01_14, 7_11, 1_52, 20, 6, 5, 2_23_76, 6_42, 12_21, 1_51_90, 3_41_53, 4_50, 56_08, 9_59, 11_19, 5_77_02, 1_36, 1_86, 47, 10_98, 2_93_67, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 60_44, 2_37, 62_84, 5_09_01, 5_28, 31, 90, 34, 9_27, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(UpperCAmelCase_ ,self.big_tokenizer.encode(UpperCAmelCase_ ) ) @slow def lowerCamelCase__ ( self ): # fmt: off _lowercase : List[str] = {"""input_ids""": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ ,model_name="""xlm-roberta-base""" ,revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" ,)
721
"""simple docstring""" import math def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 10001 ): try: _lowercase : Union[str, Any] = int(__UpperCAmelCase ) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""" ) from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""" ) _lowercase : list[int] = [] _lowercase : Union[str, Any] = 2 while len(__UpperCAmelCase ) < nth: if is_prime(__UpperCAmelCase ): primes.append(__UpperCAmelCase ) num += 1 else: num += 1 return primes[len(__UpperCAmelCase ) - 1] if __name__ == "__main__": print(F'{solution() = }')
600
0
'''simple docstring''' from collections.abc import Sequence from queue import Queue class __magic_name__ : def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None ): lowercase =start lowercase =end lowercase =val lowercase =(start + end) // 2 lowercase =left lowercase =right def __repr__( self ): return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})' class __magic_name__ : def __init__( self , snake_case_ , snake_case_ ): lowercase =collection lowercase =function if self.collection: lowercase =self._build_tree(0 , len(snake_case_ ) - 1 ) def _A( self , snake_case_ , snake_case_ ): self._update_tree(self.root , snake_case_ , snake_case_ ) def _A( self , snake_case_ , snake_case_ ): return self._query_range(self.root , snake_case_ , snake_case_ ) def _A( self , snake_case_ , snake_case_ ): if start == end: return SegmentTreeNode(snake_case_ , snake_case_ , self.collection[start] ) lowercase =(start + end) // 2 lowercase =self._build_tree(snake_case_ , snake_case_ ) lowercase =self._build_tree(mid + 1 , snake_case_ ) return SegmentTreeNode(snake_case_ , snake_case_ , self.fn(left.val , right.val ) , snake_case_ , snake_case_ ) def _A( self , snake_case_ , snake_case_ , snake_case_ ): if node.start == i and node.end == i: lowercase =val return if i <= node.mid: self._update_tree(node.left , snake_case_ , snake_case_ ) else: self._update_tree(node.right , snake_case_ , snake_case_ ) lowercase =self.fn(node.left.val , node.right.val ) def _A( self , snake_case_ , snake_case_ , snake_case_ ): if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , snake_case_ , snake_case_ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , snake_case_ , node.mid ) , self._query_range(node.right , node.mid + 1 , snake_case_ ) , ) else: # range in right child tree return self._query_range(node.right , snake_case_ , snake_case_ ) def _A( self ): if self.root is not None: lowercase =Queue() queue.put(self.root ) while not queue.empty(): lowercase =queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('''*''' * 50) _UpperCAmelCase : Any = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
72
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase : Any = logging.get_logger(__name__) lowercase : Union[str, Any] = {'vocab_file': 'spiece.model'} lowercase : Tuple = { 'vocab_file': { 'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model', } } lowercase : List[str] = { 'AI-Sweden/gpt-sw3-126m': 2048, 'AI-Sweden/gpt-sw3-350m': 2048, 'AI-Sweden/gpt-sw3-1.6b': 2048, 'AI-Sweden/gpt-sw3-6.7b': 2048, 'AI-Sweden/gpt-sw3-20b': 2048, } class lowerCamelCase__ ( __lowercase): '''simple docstring''' _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = ['input_ids', 'attention_mask'] def __init__( self :int , a :List[Any] , a :int=False , a :int=False , a :List[str]=False , a :Optional[Any]=None , a :Tuple=None , a :Optional[Any]=None , a :Any=None , a :Optional[Dict[str, Any]] = None , **a :Dict , ) -> None: __UpperCamelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs __UpperCamelCase : Any = kwargs.get("name_or_path" ) if name_or_path is None: logger.warning( "name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b," " you are testing the model, this can safely be ignored" ) __UpperCamelCase : Any = "None" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __UpperCamelCase : Union[str, Any] = "<|endoftext|>" if eos_token is None else eos_token __UpperCamelCase : int = "<unk>" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __UpperCamelCase : str = unk_token if pad_token is None else pad_token __UpperCamelCase : Dict = eos_token if bos_token is None else bos_token else: __UpperCamelCase : Optional[int] = "<pad>" if pad_token is None else pad_token __UpperCamelCase : Any = "<s>" if bos_token is None else bos_token super().__init__( do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , pad_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , ) __UpperCamelCase : List[str] = do_lower_case __UpperCamelCase : List[str] = remove_space __UpperCamelCase : int = keep_accents __UpperCamelCase : List[Any] = vocab_file __UpperCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(a ) # Used for whitespace normalization in input texts # fmt : off __UpperCamelCase : Dict = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", "„"} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __UpperCamelCase : int = re.compile( f'[{"".join(map(a , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' ) def __getstate__( self :Tuple ) -> Union[str, Any]: __UpperCamelCase : int = self.__dict__.copy() __UpperCamelCase : Optional[Any] = None return state def __setstate__( self :Union[str, Any] , a :Optional[int] ) -> Tuple: __UpperCamelCase : int = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __UpperCamelCase : Optional[int] = {} __UpperCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _lowerCamelCase ( self :int ) -> int: return len(self.sp_model ) def _lowerCamelCase ( self :Any , a :str ) -> str: __UpperCamelCase : int = self.non_printing_characters_re.sub("" , a ) # Normalize whitespaces __UpperCamelCase : Any = "".join([char if char not in self.whitespaces else " " for char in text] ) # NFC Unicode normalization __UpperCamelCase : str = unicodedata.normalize("NFC" , a ) return text def _lowerCamelCase ( self :int , a :str , **a :str ) -> List[str]: __UpperCamelCase : Any = self.preprocess_text(a ) return self.sp_model.encode(a , out_type=a ) def _lowerCamelCase ( self :int , a :str ) -> int: return self.sp_model.PieceToId(a ) def _lowerCamelCase ( self :Any , a :int ) -> str: return self.sp_model.IdToPiece(a ) @staticmethod def _lowerCamelCase ( a :str ) -> str: return out_string def _lowerCamelCase ( self :Optional[int] , a :List[str] ) -> str: __UpperCamelCase : List[Any] = [] __UpperCamelCase : Tuple = "" __UpperCamelCase : Optional[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(a ) + token __UpperCamelCase : int = True __UpperCamelCase : Any = [] else: current_sub_tokens.append(a ) __UpperCamelCase : Dict = False out_string += self.sp_model.decode(a ) return out_string def _lowerCamelCase ( self :Union[str, Any] ) -> Dict[str, int]: __UpperCamelCase : Tuple = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCamelCase ( self :Any , a :str , a :Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __UpperCamelCase : int = os.path.join( a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , a ) elif not os.path.isfile(self.vocab_file ): with open(a , "wb" ) as fi: __UpperCamelCase : Dict = self.sp_model.serialized_model_proto() fi.write(a ) return (out_vocab_file,) def _lowerCamelCase ( self :List[str] , a :Union[str, List[str]] , a :Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(a , a ): __UpperCamelCase : Optional[Any] = self.preprocess_text(a ) __UpperCamelCase : List[str] = self.sp_model.encode(a ) else: __UpperCamelCase : List[str] = [self.preprocess_text(a ) for t in text] __UpperCamelCase : str = self.sp_model.encode(a ) if return_tensors is True or return_tensors == "pt": __UpperCamelCase : Any = torch.tensor(a ) return token_ids def _lowerCamelCase ( self :Any , a :Union[int, List[int]] ) -> str: return self.sp_model.decode(a ) def _lowerCamelCase ( self :Dict , a :"Conversation" ) -> List[int]: __UpperCamelCase : Dict = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()] __UpperCamelCase : int = ( f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(a ) + f'{self.bos_token}Bot:' ) return self.encode(text=a )
557
0
from __future__ import annotations from typing import Any def _lowerCAmelCase ( __lowerCAmelCase ) -> None: """simple docstring""" create_state_space_tree(__lowerCAmelCase , [] , 0 ) def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None: """simple docstring""" if index == len(__lowerCAmelCase ): print(__lowerCAmelCase ) return create_state_space_tree(__lowerCAmelCase , __lowerCAmelCase , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(__lowerCAmelCase , __lowerCAmelCase , index + 1 ) current_subsequence.pop() if __name__ == "__main__": A__ = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(['''A''', '''B''', '''C''']) generate_all_subsequences(seq)
219
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class a : def __init__( self :int ,__lowercase :Union[str, Any] ,__lowercase :Optional[int]=1_3 ,__lowercase :Dict=7 ,__lowercase :Union[str, Any]=True ,__lowercase :List[Any]=True ,__lowercase :Tuple=False ,__lowercase :Optional[int]=True ,__lowercase :Optional[int]=9_9 ,__lowercase :Optional[Any]=3_2 ,__lowercase :Union[str, Any]=5 ,__lowercase :Dict=4 ,__lowercase :Optional[Any]=3_7 ,__lowercase :Optional[int]="gelu" ,__lowercase :Optional[int]=0.1 ,__lowercase :Dict=0.1 ,__lowercase :str=5_1_2 ,__lowercase :str=1_6 ,__lowercase :Optional[Any]=2 ,__lowercase :Union[str, Any]=0.02 ,__lowercase :Optional[int]=3 ,__lowercase :Optional[Any]=4 ,__lowercase :Any=None ,): snake_case__ : Union[str, Any] = parent snake_case__ : Any = batch_size snake_case__ : Dict = seq_length snake_case__ : Tuple = is_training snake_case__ : List[str] = use_input_mask snake_case__ : int = use_token_type_ids snake_case__ : List[str] = use_labels snake_case__ : List[str] = vocab_size snake_case__ : str = hidden_size snake_case__ : List[Any] = num_hidden_layers snake_case__ : Any = num_attention_heads snake_case__ : Optional[int] = intermediate_size snake_case__ : Union[str, Any] = hidden_act snake_case__ : Optional[int] = hidden_dropout_prob snake_case__ : Tuple = attention_probs_dropout_prob snake_case__ : Optional[Any] = max_position_embeddings snake_case__ : Optional[Any] = type_vocab_size snake_case__ : List[Any] = type_sequence_label_size snake_case__ : List[str] = initializer_range snake_case__ : List[Any] = num_labels snake_case__ : List[Any] = num_choices snake_case__ : Optional[int] = scope def __lowerCamelCase ( self :Tuple ): snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) snake_case__ : Tuple = None if self.use_input_mask: snake_case__ : str = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ : Tuple = None if self.use_token_type_ids: snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) snake_case__ : List[Any] = None snake_case__ : Union[str, Any] = None snake_case__ : int = None if self.use_labels: snake_case__ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) snake_case__ : Dict = ids_tensor([self.batch_size] ,self.num_choices ) snake_case__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self :Optional[Any] ): return LlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,) def __lowerCamelCase ( self :int ,__lowercase :List[str] ,__lowercase :int ,__lowercase :Dict ,__lowercase :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :Any ,__lowercase :Optional[int] ): snake_case__ : int = LlamaModel(config=__lowercase ) model.to(__lowercase ) model.eval() snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase ) snake_case__ : Union[str, Any] = model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self :int ,__lowercase :str ,__lowercase :Optional[Any] ,__lowercase :Tuple ,__lowercase :int ,__lowercase :Tuple ,__lowercase :Tuple ,__lowercase :Any ,__lowercase :Dict ,__lowercase :List[Any] ,): snake_case__ : List[str] = True snake_case__ : Union[str, Any] = LlamaModel(__lowercase ) model.to(__lowercase ) model.eval() snake_case__ : Optional[int] = model( __lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,) snake_case__ : str = model( __lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,) snake_case__ : Union[str, Any] = model(__lowercase ,attention_mask=__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self :Optional[int] ,__lowercase :int ,__lowercase :int ,__lowercase :Tuple ,__lowercase :Optional[Any] ,__lowercase :Optional[Any] ,__lowercase :Dict ,__lowercase :Union[str, Any] ,__lowercase :Dict ,__lowercase :List[Any] ,): snake_case__ : Optional[int] = LlamaForCausalLM(config=__lowercase ) model.to(__lowercase ) model.eval() snake_case__ : int = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self :Any ,__lowercase :Tuple ,__lowercase :str ,__lowercase :int ,__lowercase :Any ,__lowercase :Any ,__lowercase :Dict ,__lowercase :Tuple ,__lowercase :Tuple ,__lowercase :List[str] ,): snake_case__ : int = True snake_case__ : Union[str, Any] = True snake_case__ : List[str] = LlamaForCausalLM(config=__lowercase ) model.to(__lowercase ) model.eval() # first forward pass snake_case__ : Tuple = model( __lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,use_cache=__lowercase ,) snake_case__ : str = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case__ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size ) snake_case__ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and snake_case__ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) snake_case__ : Dict = torch.cat([input_mask, next_mask] ,dim=-1 ) snake_case__ : Dict = model( __lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0] snake_case__ : Any = model( __lowercase ,attention_mask=__lowercase ,encoder_hidden_states=__lowercase ,encoder_attention_mask=__lowercase ,past_key_values=__lowercase ,output_hidden_states=__lowercase ,)['''hidden_states'''][0] # select random slice snake_case__ : Tuple = ids_tensor((1,) ,output_from_past.shape[-1] ).item() snake_case__ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-3 ) ) def __lowerCamelCase ( self :Dict ): snake_case__ : Any = self.prepare_config_and_inputs() ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) : int = config_and_inputs snake_case__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class a ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): __lowerCAmelCase : List[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () __lowerCAmelCase : Optional[int] = (LlamaForCausalLM,) if is_torch_available() else () __lowerCAmelCase : List[str] = ( { """feature-extraction""": LlamaModel, """text-classification""": LlamaForSequenceClassification, """text-generation""": LlamaForCausalLM, """zero-shot""": LlamaForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase : str = False __lowerCAmelCase : Any = False def __lowerCamelCase ( self :List[Any] ): snake_case__ : Any = LlamaModelTester(self ) snake_case__ : Dict = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 ) def __lowerCamelCase ( self :Dict ): self.config_tester.run_common_tests() def __lowerCamelCase ( self :Tuple ): snake_case__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def __lowerCamelCase ( self :Optional[Any] ): snake_case__ : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case__ : Union[str, Any] = type self.model_tester.create_and_check_model(*__lowercase ) def __lowerCamelCase ( self :List[str] ): snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : Dict = 3 snake_case__ : Union[str, Any] = input_dict['''input_ids'''] snake_case__ : Tuple = input_ids.ne(1 ).to(__lowercase ) snake_case__ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) snake_case__ : Union[str, Any] = LlamaForSequenceClassification(__lowercase ) model.to(__lowercase ) model.eval() snake_case__ : List[str] = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def __lowerCamelCase ( self :str ): snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : Union[str, Any] = 3 snake_case__ : List[Any] = '''single_label_classification''' snake_case__ : Tuple = input_dict['''input_ids'''] snake_case__ : Optional[int] = input_ids.ne(1 ).to(__lowercase ) snake_case__ : Any = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) snake_case__ : Dict = LlamaForSequenceClassification(__lowercase ) model.to(__lowercase ) model.eval() snake_case__ : Dict = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def __lowerCamelCase ( self :Optional[int] ): snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : Any = 3 snake_case__ : Optional[int] = '''multi_label_classification''' snake_case__ : str = input_dict['''input_ids'''] snake_case__ : Tuple = input_ids.ne(1 ).to(__lowercase ) snake_case__ : List[str] = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) snake_case__ : Optional[int] = LlamaForSequenceClassification(__lowercase ) model.to(__lowercase ) model.eval() snake_case__ : List[Any] = model(__lowercase ,attention_mask=__lowercase ,labels=__lowercase ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' ) def __lowerCamelCase ( self :Dict ): pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def __lowerCamelCase ( self :Optional[int] ,__lowercase :Tuple ): snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : Any = ids_tensor([1, 1_0] ,config.vocab_size ) snake_case__ : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights snake_case__ : Any = LlamaModel(__lowercase ) original_model.to(__lowercase ) original_model.eval() snake_case__ : Any = original_model(__lowercase ).last_hidden_state snake_case__ : Any = original_model(__lowercase ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights snake_case__ : List[str] = {'''type''': scaling_type, '''factor''': 10.0} snake_case__ : str = LlamaModel(__lowercase ) scaled_model.to(__lowercase ) scaled_model.eval() snake_case__ : List[str] = scaled_model(__lowercase ).last_hidden_state snake_case__ : Dict = scaled_model(__lowercase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__lowercase ,__lowercase ,atol=1e-5 ) ) @require_torch class a ( unittest.TestCase ): @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def __lowerCamelCase ( self :Union[str, Any] ): snake_case__ : Optional[int] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] snake_case__ : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' ,device_map='''auto''' ) snake_case__ : int = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 snake_case__ : Optional[Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off snake_case__ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def __lowerCamelCase ( self :List[str] ): snake_case__ : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] snake_case__ : Any = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' ,device_map='''auto''' ) snake_case__ : str = model(torch.tensor(__lowercase ) ) # Expected mean on dim = -1 snake_case__ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off snake_case__ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def __lowerCamelCase ( self :List[Any] ): snake_case__ : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] snake_case__ : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''auto''' ) snake_case__ : str = model(torch.tensor(__lowercase ) ) # Expected mean on dim = -1 snake_case__ : Optional[Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off snake_case__ : Optional[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 ) @unittest.skip( '''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' ) @slow def __lowerCamelCase ( self :Optional[Any] ): snake_case__ : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] snake_case__ : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' ,device_map='''auto''' ) snake_case__ : Any = model(torch.tensor(__lowercase ) ) snake_case__ : Optional[int] = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) ,__lowercase ,atol=1e-2 ,rtol=1e-2 ) # fmt: off snake_case__ : Tuple = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] ,__lowercase ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('''Model is curently gated''' ) @slow def __lowerCamelCase ( self :Dict ): snake_case__ : Tuple = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi''' snake_case__ : Optional[Any] = '''Simply put, the theory of relativity states that ''' snake_case__ : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ) snake_case__ : List[Any] = tokenizer.encode(__lowercase ,return_tensors='''pt''' ) snake_case__ : List[str] = LlamaForCausalLM.from_pretrained( '''meta-llama/Llama-2-13b-chat-hf''' ,device_map='''sequential''' ,use_safetensors=__lowercase ) # greedy generation outputs snake_case__ : int = model.generate(__lowercase ,max_new_tokens=6_4 ,top_p=__lowercase ,temperature=1 ,do_sample=__lowercase ) snake_case__ : Union[str, Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=__lowercase ) self.assertEqual(__lowercase ,__lowercase )
219
1
def lowerCAmelCase_ ( _snake_case : int = 1000 ) -> int: '''simple docstring''' __magic_name__ , __magic_name__ : Optional[Any] = 1, 1 __magic_name__ : str = 2 while True: __magic_name__ : Union[str, Any] = 0 __magic_name__ : Dict = fa + fa __magic_name__ , __magic_name__ : str = fa, f index += 1 for _ in str(_snake_case ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
124
import os import numpy import onnx def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : int ) -> List[str]: '''simple docstring''' __magic_name__ : Dict = a.name __magic_name__ : Optional[Any] = b.name __magic_name__ : Optional[int] = "" __magic_name__ : int = "" __magic_name__ : Any = a == b __magic_name__ : int = name_a __magic_name__ : List[str] = name_b return res def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Dict , _snake_case : str ) -> str: '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(_snake_case , _snake_case ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case ) _graph_replace_input_with(node_proto.attribute[1].g , _snake_case , _snake_case ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case ) def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Optional[Any] , _snake_case : str ) -> Any: '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(_snake_case , _snake_case , _snake_case ) def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : List[str] , _snake_case : Union[str, Any] ) -> List[Any]: '''simple docstring''' __magic_name__ : Tuple = list(model.graph.initializer ) __magic_name__ : Any = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __magic_name__ : Dict = inits[i].name __magic_name__ : List[Any] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , _snake_case , _snake_case ) def lowerCAmelCase_ ( _snake_case : str ) -> List[str]: '''simple docstring''' __magic_name__ : Union[str, Any] = os.path.dirname(_snake_case ) __magic_name__ : List[str] = os.path.basename(_snake_case ) __magic_name__ : Tuple = onnx.load(os.path.join(_snake_case , _snake_case ) ) __magic_name__ : Dict = list(model.graph.initializer ) __magic_name__ : Dict = set() __magic_name__ : Any = {} __magic_name__ : Tuple = [] __magic_name__ : Optional[int] = 0 for i in range(len(_snake_case ) ): if i in dup_set: continue for j in range(i + 1 , len(_snake_case ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(_snake_case ) dup_set.add(_snake_case ) __magic_name__ : Optional[int] = inits[j].data_type __magic_name__ : Any = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("unexpected data type: " , _snake_case ) total_reduced_size += mem_size __magic_name__ : Optional[int] = inits[i].name __magic_name__ : Optional[Any] = inits[j].name if name_i in dup_map: dup_map[name_i].append(_snake_case ) else: __magic_name__ : Union[str, Any] = [name_j] ind_to_replace.append((j, i) ) print("total reduced size: " , total_reduced_size / 1024 / 1024 / 1024 , "GB" ) __magic_name__ : List[Any] = sorted(_snake_case ) _remove_dup_initializers_from_model(_snake_case , _snake_case , _snake_case ) __magic_name__ : List[str] = "optimized_" + model_file_name __magic_name__ : Tuple = os.path.join(_snake_case , _snake_case ) onnx.save(_snake_case , _snake_case ) return new_model
124
1
"""simple docstring""" import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy __UpperCAmelCase = logging.getLogger(__name__) __UpperCAmelCase = '''pytorch_model.bin''' @dataclasses.dataclass class __UpperCAmelCase : __lowerCamelCase : str = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) __lowerCamelCase : Optional[str] = dataclasses.field( default=_UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , ) @dataclasses.dataclass class __UpperCAmelCase : __lowerCamelCase : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} ) __lowerCamelCase : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} ) __lowerCamelCase : Optional[str] = dataclasses.field( default=_UpperCamelCase , metadata={"help": "A csv or a json file containing the validation data."} ) __lowerCamelCase : Optional[str] = dataclasses.field( default=_UpperCamelCase , metadata={"help": "The name of the task to train on."} , ) __lowerCamelCase : Optional[List[str]] = dataclasses.field( default=_UpperCamelCase , metadata={"help": "The list of labels for the task."} ) @dataclasses.dataclass class __UpperCAmelCase : __lowerCamelCase : str = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) __lowerCamelCase : Optional[str] = dataclasses.field( default="accuracy" , metadata={"help": "The evaluation metric used for the task."} ) __lowerCamelCase : Optional[str] = dataclasses.field( default="no" , metadata={ "help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]" } , ) __lowerCamelCase : Optional[int] = dataclasses.field( default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) __lowerCamelCase : Optional[float] = dataclasses.field( default=0.0 , metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." } , ) __lowerCamelCase : Optional[bool] = dataclasses.field( default=_UpperCamelCase , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , ) __lowerCamelCase : Optional[bool] = dataclasses.field( default=_UpperCamelCase , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , ) __lowerCamelCase : Optional[bool] = dataclasses.field( default=_UpperCamelCase , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , ) __lowerCamelCase : Optional[float] = dataclasses.field( default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , ) __lowerCamelCase : Optional[int] = dataclasses.field( default=1_00 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) __lowerCamelCase : Optional[int] = dataclasses.field( default=_UpperCamelCase , metadata={"help": "Random seed for initialization."} , ) def lowercase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] ) -> Tuple: '''simple docstring''' a__ : Optional[int] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: a__ : str = dataset.filter(lambda lowerCAmelCase__ : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 a__ : Dict = int(eval_result * len(lowerCAmelCase__ ) ) print(lowerCAmelCase__ ) a__ : Any = dataset.sort("probability" , reverse=lowerCAmelCase__ ) a__ : Tuple = dataset.select(range(lowerCAmelCase__ ) ) a__ : Any = dataset.remove_columns(["label", "probability"] ) a__ : List[str] = dataset.rename_column("prediction" , "label" ) a__ : List[Any] = dataset.map(lambda lowerCAmelCase__ : {"label": idalabel[example["label"]]} ) a__ : str = dataset.shuffle(seed=args.seed ) a__ : str = os.path.join(lowerCAmelCase__ , F"train_pseudo.{args.data_file_extension}" ) if args.data_file_extension == "csv": dataset.to_csv(lowerCAmelCase__ , index=lowerCAmelCase__ ) else: dataset.to_json(lowerCAmelCase__ ) def lowercase__ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : str ) -> List[str]: '''simple docstring''' a__ : Union[str, Any] = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() a__ : List[Any] = STModelArguments(model_name_or_path=lowerCAmelCase__ ) a__ : List[Any] = STDataArguments(train_file=lowerCAmelCase__ , infer_file=lowerCAmelCase__ ) a__ : List[str] = STTrainingArguments(output_dir=lowerCAmelCase__ ) a__ : str = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(lowerCAmelCase__ ).items(): setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for key, value in kwargs.items(): if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ): setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Sanity checks a__ : int = {} a__ : int = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None a__ : int = args.train_file a__ : Union[str, Any] = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None a__ : Dict = args.eval_file for key in data_files: a__ : str = data_files[key].split("." )[-1] assert extension in ["csv", "json"], F"`{key}_file` should be a csv or a json file." if args.data_file_extension is None: a__ : str = extension else: assert extension == args.data_file_extension, F"`{key}_file` should be a {args.data_file_extension} file`." assert ( args.eval_metric in datasets.list_metrics() ), F"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}." # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("Creating the initial data directory for self-training..." ) a__ : Optional[int] = F"{args.output_dir}/self-train_iter-{{}}".format a__ : str = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=lowerCAmelCase__ ) os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) accelerator.wait_for_everyone() a__ : Dict = None a__ : Dict = None a__ : List[str] = 0 a__ : int = False # Show the progress bar a__ : Union[str, Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): a__ : List[Any] = data_dir_format(lowerCAmelCase__ ) assert os.path.exists(lowerCAmelCase__ ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 a__ : Tuple = os.path.join(lowerCAmelCase__ , "stage-1" ) a__ : int = { "accelerator": accelerator, "model_name_or_path": args.model_name_or_path, "cache_dir": args.cache_dir, "do_train": True, "train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"], "do_eval": True if args.eval_file is not None else False, "eval_file": data_files["eval"], "do_predict": True, "infer_file": data_files["infer"], "task_name": args.task_name, "label_list": args.label_list, "output_dir": current_output_dir, "eval_metric": args.eval_metric, "evaluation_strategy": args.evaluation_strategy, "early_stopping_patience": args.early_stopping_patience, "early_stopping_threshold": args.early_stopping_threshold, "seed": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ): arguments_dict.update({key: value} ) a__ : Dict = os.path.join(lowerCAmelCase__ , "best-checkpoint" , lowerCAmelCase__ ) if os.path.exists(lowerCAmelCase__ ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , lowerCAmelCase__ , lowerCAmelCase__ , ) else: logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , lowerCAmelCase__ ) finetune(**lowerCAmelCase__ ) accelerator.wait_for_everyone() assert os.path.exists(lowerCAmelCase__ ) logger.info("Self-training job completed: iteration: %d, stage: 1." , lowerCAmelCase__ ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data a__ : int = os.path.join(lowerCAmelCase__ , "best-checkpoint" ) a__ : str = os.path.join(lowerCAmelCase__ , "stage-2" ) # Update arguments_dict a__ : Optional[int] = model_path a__ : Optional[int] = data_files["train"] a__ : int = current_output_dir a__ : Any = os.path.join(lowerCAmelCase__ , "best-checkpoint" , lowerCAmelCase__ ) if os.path.exists(lowerCAmelCase__ ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , lowerCAmelCase__ , lowerCAmelCase__ , ) else: logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , lowerCAmelCase__ ) finetune(**lowerCAmelCase__ ) accelerator.wait_for_everyone() assert os.path.exists(lowerCAmelCase__ ) logger.info("Self-training job completed: iteration: %d, stage: 2." , lowerCAmelCase__ ) a__ : Any = iteration a__ : int = data_dir_format(iteration + 1 ) a__ : Any = AutoConfig.from_pretrained(os.path.join(lowerCAmelCase__ , "best-checkpoint" ) ) a__ : Optional[Any] = config.idalabel a__ : List[str] = os.path.join(lowerCAmelCase__ , "eval_results_best-checkpoint.json" ) a__ : Dict = os.path.join(lowerCAmelCase__ , "test_results_best-checkpoint.json" ) assert os.path.exists(lowerCAmelCase__ ) with open(lowerCAmelCase__ , "r" ) as f: a__ : List[str] = float(json.load(lowerCAmelCase__ )[args.eval_metric] ) a__ : Any = os.path.join(lowerCAmelCase__ , "infer_output_best-checkpoint.csv" ) assert os.path.exists(lowerCAmelCase__ ) # Loading the dataset from local csv or json files. a__ : List[Any] = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"] a__ : str = load_dataset("csv" , data_files={"data": infer_output_file} )["data"] if accelerator.is_main_process: os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) shutil.copy(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , F"eval_results_iter-{iteration}.json" ) ) if os.path.exists(lowerCAmelCase__ ): shutil.copy(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , F"test_results_iter-{iteration}.json" ) ) create_pseudo_labeled_data(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) accelerator.wait_for_everyone() a__ : List[str] = os.path.join(lowerCAmelCase__ , F"train_pseudo.{args.data_file_extension}" ) if args.evaluation_strategy != IntervalStrategy.NO.value: a__ : Optional[int] = eval_result if best_iteration is None: a__ : str = new_iteration a__ : Dict = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: a__ : List[Any] = new_iteration a__ : int = new_eval_result a__ : str = 0 else: if new_eval_result == best_eval_result: a__ : Union[str, Any] = new_iteration a__ : str = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: a__ : List[Any] = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("Best iteration: %d" , lowerCAmelCase__ ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , lowerCAmelCase__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(lowerCAmelCase__ , F"eval_results_iter-{iteration}.json" ) , os.path.join(lowerCAmelCase__ , "eval_results_best-iteration.json" ) , ) else: # Assume that the last iteration is the best logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 ) logger.info("Best evaluation result: %s = %f" , args.eval_metric , lowerCAmelCase__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(lowerCAmelCase__ , F"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(lowerCAmelCase__ , "eval_results_best-iteration.json" ) , )
251
"""simple docstring""" import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline __UpperCAmelCase = datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCAmelCase ( datasets.BuilderConfig ): __lowerCamelCase : Optional[datasets.Features] = None __lowerCamelCase : str = "utf-8" __lowerCamelCase : Optional[str] = None __lowerCamelCase : Optional[str] = None __lowerCamelCase : bool = True # deprecated __lowerCamelCase : Optional[int] = None # deprecated __lowerCamelCase : int = 10 << 20 # 10MB __lowerCamelCase : Optional[bool] = None class __UpperCAmelCase ( datasets.ArrowBasedBuilder ): __lowerCamelCase : Dict = JsonConfig def UpperCAmelCase ( self : Tuple ) -> Dict: '''simple docstring''' if self.config.block_size is not None: logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" ) a__ : Union[str, Any] = self.config.block_size if self.config.use_threads is not True: logger.warning( "The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." ) if self.config.newlines_in_values is not None: raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" ) return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase ( self : str , a_ : List[str] ) -> List[str]: '''simple docstring''' if not self.config.data_files: raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" ) a__ : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a_ , (str, list, tuple) ): a__ : List[Any] = data_files if isinstance(a_ , a_ ): a__ : List[Any] = [files] a__ : List[str] = [dl_manager.iter_files(a_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] a__ : Tuple = [] for split_name, files in data_files.items(): if isinstance(a_ , a_ ): a__ : List[str] = [files] a__ : int = [dl_manager.iter_files(a_ ) for file in files] splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={"files": files} ) ) return splits def UpperCAmelCase ( self : Tuple , a_ : pa.Table ) -> pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): a__ : Optional[int] = self.config.features.arrow_schema.field(a_ ).type a__ : int = pa_table.append_column(a_ , pa.array([None] * len(a_ ) , type=a_ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example a__ : Optional[int] = table_cast(a_ , self.config.features.arrow_schema ) return pa_table def UpperCAmelCase ( self : Union[str, Any] , a_ : Dict ) -> Any: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(a_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: a__ : Dict = json.load(a_ ) # We keep only the field we are interested in a__ : Tuple = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(a_ , (list, tuple) ): a__ : int = set().union(*[row.keys() for row in dataset] ) a__ : str = {col: [row.get(a_ ) for row in dataset] for col in keys} else: a__ : List[str] = dataset a__ : List[str] = pa.Table.from_pydict(a_ ) yield file_idx, self._cast_table(a_ ) # If the file has one json object per line else: with open(a_ , "rb" ) as f: a__ : Union[str, Any] = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small a__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 ) a__ : str = ( self.config.encoding_errors if self.config.encoding_errors is not None else "strict" ) while True: a__ : List[str] = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(a_ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": a__ : str = batch.decode(self.config.encoding , errors=a_ ).encode("utf-8" ) try: while True: try: a__ : List[str] = paj.read_json( io.BytesIO(a_ ) , read_options=paj.ReadOptions(block_size=a_ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(a_ , pa.ArrowInvalid ) and "straddling" not in str(a_ ) or block_size > len(a_ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"Batch of {len(a_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( a_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: a__ : Any = json.load(a_ ) except json.JSONDecodeError: logger.error(F"Failed to read file '{file}' with error {type(a_ )}: {e}" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(a_ , a_ ): # list is the only sequence type supported in JSON try: a__ : Optional[int] = set().union(*[row.keys() for row in dataset] ) a__ : int = {col: [row.get(a_ ) for row in dataset] for col in keys} a__ : int = pa.Table.from_pydict(a_ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"Failed to read file '{file}' with error {type(a_ )}: {e}" ) raise ValueError(F"Not able to read records in the JSON file at {file}." ) from None yield file_idx, self._cast_table(a_ ) break else: logger.error(F"Failed to read file '{file}' with error {type(a_ )}: {e}" ) raise ValueError( F"Not able to read records in the JSON file at {file}. " F"You should probably indicate the field of the JSON file containing your records. " F"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. " F"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(a_ ) batch_idx += 1
251
1
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 UpperCAmelCase__ :List[Any] = sys.version_info >= (3, 10) def __lowercase (_lowercase=None, _lowercase=None ) -> List[Any]: """simple docstring""" return field(default_factory=lambda: default, metadata=_lowercase ) @dataclass class SCREAMING_SNAKE_CASE : snake_case__ : int snake_case__ : float snake_case__ : str snake_case__ : bool @dataclass class SCREAMING_SNAKE_CASE : snake_case__ : int = 4_2 snake_case__ : str = field(default='toto' , metadata={'help': 'help message'} ) @dataclass class SCREAMING_SNAKE_CASE : snake_case__ : bool = False snake_case__ : bool = True snake_case__ : Optional[bool] = None class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): snake_case__ : str = 'titi' snake_case__ : Dict = 'toto' class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): snake_case__ : List[Any] = 'titi' snake_case__ : Dict = 'toto' snake_case__ : Any = 4_2 @dataclass class SCREAMING_SNAKE_CASE : snake_case__ : BasicEnum = "toto" def a_ ( self : int ): """simple docstring""" __lowerCamelCase : Union[str, Any] = BasicEnum(self.foo ) @dataclass class SCREAMING_SNAKE_CASE : snake_case__ : MixedTypeEnum = "toto" def a_ ( self : Any ): """simple docstring""" __lowerCamelCase : Any = MixedTypeEnum(self.foo ) @dataclass class SCREAMING_SNAKE_CASE : snake_case__ : Optional[int] = None snake_case__ : Optional[float] = field(default=lowerCAmelCase_ , metadata={'help': 'help message'} ) snake_case__ : Optional[str] = None snake_case__ : Optional[List[str]] = list_field(default=[] ) snake_case__ : Optional[List[int]] = list_field(default=[] ) @dataclass class SCREAMING_SNAKE_CASE : snake_case__ : List[int] = list_field(default=[] ) snake_case__ : List[int] = list_field(default=[1, 2, 3] ) snake_case__ : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) snake_case__ : List[float] = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class SCREAMING_SNAKE_CASE : snake_case__ : List[int] = field() snake_case__ : str = field() snake_case__ : BasicEnum = field() def a_ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase : Any = BasicEnum(self.required_enum ) @dataclass class SCREAMING_SNAKE_CASE : snake_case__ : int snake_case__ : "BasicEnum" = field() snake_case__ : "Optional[bool]" = None snake_case__ : "str" = field(default='toto' , metadata={'help': 'help message'} ) snake_case__ : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class SCREAMING_SNAKE_CASE : snake_case__ : bool = False snake_case__ : bool = True snake_case__ : bool | None = None @dataclass class SCREAMING_SNAKE_CASE : snake_case__ : int | None = None snake_case__ : float | None = field(default=lowerCAmelCase_ , metadata={'help': 'help message'} ) snake_case__ : str | None = None snake_case__ : list[str] | None = list_field(default=[] ) snake_case__ : list[int] | None = list_field(default=[] ) class SCREAMING_SNAKE_CASE ( unittest.TestCase ): def a_ ( self : Any , A__ : argparse.ArgumentParser , A__ : argparse.ArgumentParser ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): __lowerCamelCase : Tuple = {k: v for k, v in vars(A__ ).items() if k != """container"""} __lowerCamelCase : str = {k: v for k, v in vars(A__ ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , A__ ) and yy.get("""choices""" , A__ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](A__ ) , yy["""type"""](A__ ) ) del xx["type"], yy["type"] self.assertEqual(A__ , A__ ) def a_ ( self : List[str] ): """simple docstring""" __lowerCamelCase : Optional[Any] = HfArgumentParser(A__ ) __lowerCamelCase : int = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=A__ , required=A__ ) expected.add_argument("""--bar""" , type=A__ , required=A__ ) expected.add_argument("""--baz""" , type=A__ , required=A__ ) expected.add_argument("""--flag""" , type=A__ , default=A__ , const=A__ , nargs="""?""" ) self.argparsersEqual(A__ , A__ ) __lowerCamelCase : Optional[Any] = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] ((__lowerCamelCase) , ) : Tuple = parser.parse_args_into_dataclasses(A__ , look_for_args_file=A__ ) self.assertFalse(example.flag ) def a_ ( self : Union[str, Any] ): """simple docstring""" __lowerCamelCase : int = HfArgumentParser(A__ ) __lowerCamelCase : int = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=42 , type=A__ ) expected.add_argument("""--baz""" , default="""toto""" , type=A__ , help="""help message""" ) self.argparsersEqual(A__ , A__ ) def a_ ( self : Dict ): """simple docstring""" __lowerCamelCase : Tuple = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=A__ , default=A__ , const=A__ , nargs="""?""" ) expected.add_argument("""--baz""" , type=A__ , default=A__ , const=A__ , nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=A__ , dest="""baz""" ) expected.add_argument("""--opt""" , type=A__ , default=A__ ) __lowerCamelCase : Union[str, Any] = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(A__ ) for dataclass_type in dataclass_types: __lowerCamelCase : List[str] = HfArgumentParser(A__ ) self.argparsersEqual(A__ , A__ ) __lowerCamelCase : Optional[Any] = parser.parse_args([] ) self.assertEqual(A__ , Namespace(foo=A__ , baz=A__ , opt=A__ ) ) __lowerCamelCase : Optional[int] = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(A__ , Namespace(foo=A__ , baz=A__ , opt=A__ ) ) __lowerCamelCase : Any = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(A__ , Namespace(foo=A__ , baz=A__ , opt=A__ ) ) __lowerCamelCase : int = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(A__ , Namespace(foo=A__ , baz=A__ , opt=A__ ) ) __lowerCamelCase : List[Any] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(A__ , Namespace(foo=A__ , baz=A__ , opt=A__ ) ) def a_ ( self : Any ): """simple docstring""" __lowerCamelCase : Any = HfArgumentParser(A__ ) __lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(A__ , A__ ) __lowerCamelCase : List[Any] = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) __lowerCamelCase : List[str] = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) __lowerCamelCase : Any = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) __lowerCamelCase : List[str] = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) __lowerCamelCase : Any = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) __lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def a_ ( self : Tuple ): """simple docstring""" @dataclass class SCREAMING_SNAKE_CASE : snake_case__ : Literal["titi", "toto", 4_2] = "toto" __lowerCamelCase : Any = HfArgumentParser(A__ ) __lowerCamelCase : Optional[int] = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(A__ , A__ ) __lowerCamelCase : List[str] = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) __lowerCamelCase : Optional[Any] = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) __lowerCamelCase : Any = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) def a_ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase : Dict = HfArgumentParser(A__ ) __lowerCamelCase : Any = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=A__ ) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=A__ ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=A__ ) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=A__ ) self.argparsersEqual(A__ , A__ ) __lowerCamelCase : List[str] = parser.parse_args([] ) self.assertEqual( A__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , ) __lowerCamelCase : Optional[int] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(A__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) ) def a_ ( self : int ): """simple docstring""" __lowerCamelCase : List[str] = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=A__ , type=A__ ) expected.add_argument("""--bar""" , default=A__ , type=A__ , help="""help message""" ) expected.add_argument("""--baz""" , default=A__ , type=A__ ) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=A__ ) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=A__ ) __lowerCamelCase : Tuple = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(A__ ) for dataclass_type in dataclass_types: __lowerCamelCase : Dict = HfArgumentParser(A__ ) self.argparsersEqual(A__ , A__ ) __lowerCamelCase : List[Any] = parser.parse_args([] ) self.assertEqual(A__ , Namespace(foo=A__ , bar=A__ , baz=A__ , ces=[] , des=[] ) ) __lowerCamelCase : str = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(A__ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) ) def a_ ( self : Any ): """simple docstring""" __lowerCamelCase : Optional[Any] = HfArgumentParser(A__ ) __lowerCamelCase : Dict = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=A__ , required=A__ ) expected.add_argument("""--required_str""" , type=A__ , required=A__ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=A__ , ) self.argparsersEqual(A__ , A__ ) def a_ ( self : List[Any] ): """simple docstring""" __lowerCamelCase : Optional[Any] = HfArgumentParser(A__ ) __lowerCamelCase : List[str] = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=A__ , required=A__ ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=A__ , ) expected.add_argument("""--opt""" , type=A__ , default=A__ ) expected.add_argument("""--baz""" , default="""toto""" , type=A__ , help="""help message""" ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=A__ ) self.argparsersEqual(A__ , A__ ) def a_ ( self : Dict ): """simple docstring""" __lowerCamelCase : Tuple = HfArgumentParser(A__ ) __lowerCamelCase : Optional[int] = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } __lowerCamelCase : Tuple = parser.parse_dict(A__ )[0] __lowerCamelCase : Dict = BasicExample(**A__ ) self.assertEqual(A__ , A__ ) def a_ ( self : Union[str, Any] ): """simple docstring""" __lowerCamelCase : Optional[int] = HfArgumentParser(A__ ) __lowerCamelCase : int = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(A__ , parser.parse_dict , A__ , allow_extra_keys=A__ ) def a_ ( self : List[Any] ): """simple docstring""" __lowerCamelCase : Tuple = HfArgumentParser(A__ ) __lowerCamelCase : List[str] = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: __lowerCamelCase : Dict = os.path.join(A__ , """temp_json""" ) os.mkdir(A__ ) with open(temp_local_path + """.json""" , """w+""" ) as f: json.dump(A__ , A__ ) __lowerCamelCase : Any = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] __lowerCamelCase : Optional[Any] = BasicExample(**A__ ) self.assertEqual(A__ , A__ ) def a_ ( self : int ): """simple docstring""" __lowerCamelCase : List[Any] = HfArgumentParser(A__ ) __lowerCamelCase : str = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: __lowerCamelCase : Optional[Any] = os.path.join(A__ , """temp_yaml""" ) os.mkdir(A__ ) with open(temp_local_path + """.yaml""" , """w+""" ) as f: yaml.dump(A__ , A__ ) __lowerCamelCase : List[Any] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] __lowerCamelCase : Dict = BasicExample(**A__ ) self.assertEqual(A__ , A__ ) def a_ ( self : Dict ): """simple docstring""" __lowerCamelCase : int = HfArgumentParser(A__ ) self.assertIsNotNone(A__ )
150
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ :Optional[int] = logging.get_logger(__name__) UpperCAmelCase__ :Union[str, Any] = { """microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): snake_case__ : int = 'wavlm' def __init__( self : List[str] , A__ : int=32 , A__ : str=768 , A__ : List[Any]=12 , A__ : Any=12 , A__ : Any=3072 , A__ : Union[str, Any]="gelu" , A__ : Union[str, Any]=0.1 , A__ : Optional[Any]=0.1 , A__ : str=0.1 , A__ : List[str]=0.0 , A__ : List[str]=0.1 , A__ : Optional[Any]=0.1 , A__ : List[str]=0.02 , A__ : str=1e-5 , A__ : List[Any]="group" , A__ : Optional[int]="gelu" , A__ : int=(512, 512, 512, 512, 512, 512, 512) , A__ : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , A__ : str=(10, 3, 3, 3, 3, 2, 2) , A__ : List[Any]=False , A__ : str=128 , A__ : Union[str, Any]=16 , A__ : Optional[Any]=320 , A__ : List[Any]=800 , A__ : Union[str, Any]=False , A__ : Optional[int]=True , A__ : Union[str, Any]=0.05 , A__ : Any=10 , A__ : Any=2 , A__ : List[Any]=0.0 , A__ : List[Any]=10 , A__ : int=320 , A__ : Tuple=2 , A__ : Optional[int]=0.1 , A__ : Tuple=100 , A__ : Tuple=256 , A__ : int=256 , A__ : Dict=0.1 , A__ : Union[str, Any]="mean" , A__ : int=False , A__ : Optional[int]=False , A__ : List[str]=256 , A__ : Optional[Any]=(512, 512, 512, 512, 1500) , A__ : List[str]=(5, 3, 3, 1, 1) , A__ : Any=(1, 2, 3, 1, 1) , A__ : List[Any]=512 , A__ : List[str]=80 , A__ : List[str]=0 , A__ : List[Any]=1 , A__ : Optional[int]=2 , A__ : List[Any]=False , A__ : int=3 , A__ : Union[str, Any]=2 , A__ : int=3 , A__ : Optional[Any]=None , **A__ : str , ): """simple docstring""" super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ ) __lowerCamelCase : Optional[Any] = hidden_size __lowerCamelCase : List[Any] = feat_extract_norm __lowerCamelCase : Union[str, Any] = feat_extract_activation __lowerCamelCase : Union[str, Any] = list(A__ ) __lowerCamelCase : Any = list(A__ ) __lowerCamelCase : Dict = list(A__ ) __lowerCamelCase : int = conv_bias __lowerCamelCase : Optional[Any] = num_buckets __lowerCamelCase : Union[str, Any] = max_bucket_distance __lowerCamelCase : Optional[Any] = num_conv_pos_embeddings __lowerCamelCase : Dict = num_conv_pos_embedding_groups __lowerCamelCase : List[str] = len(self.conv_dim ) __lowerCamelCase : Any = num_hidden_layers __lowerCamelCase : Dict = intermediate_size __lowerCamelCase : int = hidden_act __lowerCamelCase : List[Any] = num_attention_heads __lowerCamelCase : Tuple = hidden_dropout __lowerCamelCase : Any = attention_dropout __lowerCamelCase : Optional[int] = activation_dropout __lowerCamelCase : Optional[Any] = feat_proj_dropout __lowerCamelCase : List[Any] = final_dropout __lowerCamelCase : Tuple = layerdrop __lowerCamelCase : Optional[int] = layer_norm_eps __lowerCamelCase : Tuple = initializer_range __lowerCamelCase : Optional[Any] = num_ctc_classes __lowerCamelCase : Tuple = vocab_size __lowerCamelCase : int = do_stable_layer_norm __lowerCamelCase : Dict = use_weighted_layer_sum __lowerCamelCase : List[str] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCamelCase : str = apply_spec_augment __lowerCamelCase : Tuple = mask_time_prob __lowerCamelCase : Any = mask_time_length __lowerCamelCase : Optional[int] = mask_time_min_masks __lowerCamelCase : int = mask_feature_prob __lowerCamelCase : Tuple = mask_feature_length # parameters for pretraining with codevector quantized representations __lowerCamelCase : Optional[Any] = num_codevectors_per_group __lowerCamelCase : int = num_codevector_groups __lowerCamelCase : Optional[int] = contrastive_logits_temperature __lowerCamelCase : Optional[int] = num_negatives __lowerCamelCase : str = codevector_dim __lowerCamelCase : Dict = proj_codevector_dim __lowerCamelCase : List[str] = diversity_loss_weight # ctc loss __lowerCamelCase : Union[str, Any] = ctc_loss_reduction __lowerCamelCase : Tuple = ctc_zero_infinity # adapter __lowerCamelCase : List[str] = add_adapter __lowerCamelCase : str = adapter_kernel_size __lowerCamelCase : List[str] = adapter_stride __lowerCamelCase : Tuple = num_adapter_layers __lowerCamelCase : Union[str, Any] = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. __lowerCamelCase : Any = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __lowerCamelCase : List[str] = list(A__ ) __lowerCamelCase : str = list(A__ ) __lowerCamelCase : Dict = list(A__ ) __lowerCamelCase : Optional[int] = xvector_output_dim @property def a_ ( self : Union[str, Any] ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
150
1
"""simple docstring""" from collections.abc import Callable import numpy as np def __lowerCAmelCase ( lowercase : Callable , lowercase : float , lowercase : float , lowercase : float , lowercase : float ) -> str: """simple docstring""" snake_case : Dict = int(np.ceil((x_end - xa) / step_size ) ) snake_case : Any = np.zeros((n + 1,) ) snake_case : Union[str, Any] = ya snake_case : str = xa for k in range(lowerCamelCase_ ): snake_case : Tuple = y[k] + step_size * ode_func(lowerCamelCase_ , y[k] ) snake_case : str = y[k] + ( (step_size / 2) * (ode_func(lowerCamelCase_ , y[k] ) + ode_func(x + step_size , lowerCamelCase_ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
702
"""simple docstring""" import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class _lowerCAmelCase ( snake_case_ ): def lowerCamelCase ( self ) -> Any: '''simple docstring''' snake_case : List[Any] = tempfile.mkdtemp() snake_case : Union[str, Any] = 8 # DPR tok snake_case : str = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] snake_case : Union[str, Any] = os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) snake_case : Union[str, Any] = os.path.join(UpperCamelCase__ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok snake_case : Union[str, Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] snake_case : Tuple = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) snake_case : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] snake_case : Optional[Any] = {"unk_token": "<unk>"} snake_case : Tuple = os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) snake_case : Optional[Any] = os.path.join(UpperCamelCase__ , BART_VOCAB_FILES_NAMES["vocab_file"] ) snake_case : int = os.path.join(UpperCamelCase__ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCamelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(UpperCamelCase__ ) ) def lowerCamelCase ( self ) -> DPRQuestionEncoderTokenizer: '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def lowerCamelCase ( self ) -> DPRContextEncoderTokenizer: '''simple docstring''' return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def lowerCamelCase ( self ) -> BartTokenizer: '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def lowerCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase ( self ) -> Dict: '''simple docstring''' snake_case : Dict = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def lowerCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' snake_case : List[str] = self.get_dummy_dataset() snake_case : int = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: snake_case : int = dataset snake_case : int = RagRetriever( UpperCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[Any]: '''simple docstring''' snake_case : List[str] = self.get_dummy_dataset() snake_case : Any = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: snake_case : str = os.path.join(self.tmpdirname , "dataset" ) snake_case : Any = os.path.join(self.tmpdirname , "index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) ) del dataset snake_case : Any = RagRetriever( UpperCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: snake_case : str = RagRetriever( UpperCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCamelCase__ ) , ) return retriever def lowerCamelCase ( self ) -> Optional[Any]: '''simple docstring''' snake_case : Optional[int] = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) snake_case : Dict = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) ) snake_case : int = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" ) snake_case : Any = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(UpperCamelCase__ , open(UpperCamelCase__ , "wb" ) ) snake_case : List[Any] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) snake_case : Dict = RagRetriever( UpperCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def lowerCamelCase ( self ) -> str: '''simple docstring''' snake_case : str = 1 snake_case : Any = self.get_dummy_canonical_hf_index_retriever() snake_case : Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case ,snake_case ,snake_case : Any = retriever.retrieve(UpperCamelCase__ , n_docs=UpperCamelCase__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCamelCase__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCamelCase__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def lowerCamelCase ( self ) -> List[str]: '''simple docstring''' snake_case : List[str] = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: snake_case : List[str] = self.get_dummy_dataset() retriever.save_pretrained(UpperCamelCase__ ) snake_case : Union[str, Any] = RagRetriever.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) snake_case : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case : List[Any] = retriever.retrieve(UpperCamelCase__ , n_docs=1 ) self.assertTrue(out is not None ) def lowerCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = 1 snake_case : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ ) snake_case : int = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case ,snake_case ,snake_case : Any = retriever.retrieve(UpperCamelCase__ , n_docs=UpperCamelCase__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCamelCase__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCamelCase__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def lowerCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCamelCase__ ) snake_case : int = RagRetriever.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) snake_case : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case : int = retriever.retrieve(UpperCamelCase__ , n_docs=1 ) self.assertTrue(out is not None ) def lowerCamelCase ( self ) -> Optional[Any]: '''simple docstring''' snake_case : List[str] = 1 snake_case : int = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ ) snake_case : int = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case ,snake_case ,snake_case : List[str] = retriever.retrieve(UpperCamelCase__ , n_docs=UpperCamelCase__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCamelCase__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCamelCase__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def lowerCamelCase ( self ) -> Tuple: '''simple docstring''' snake_case : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCamelCase__ ) snake_case : Any = RagRetriever.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) snake_case : Optional[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case : Any = retriever.retrieve(UpperCamelCase__ , n_docs=1 ) self.assertTrue(out is not None ) def lowerCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' snake_case : Union[str, Any] = 1 snake_case : Tuple = self.get_dummy_legacy_index_retriever() snake_case : str = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case ,snake_case ,snake_case : Any = retriever.retrieve(UpperCamelCase__ , n_docs=UpperCamelCase__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCamelCase__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) , UpperCamelCase__ ) self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def lowerCamelCase ( self ) -> List[str]: '''simple docstring''' snake_case : int = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCamelCase__ ) snake_case : Tuple = RagRetriever.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) snake_case : int = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case : Optional[Any] = retriever.retrieve(UpperCamelCase__ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def lowerCamelCase ( self ) -> Optional[Any]: '''simple docstring''' import torch snake_case : str = 1 snake_case : Dict = self.get_dummy_canonical_hf_index_retriever() snake_case : str = [[5, 7], [10, 11]] snake_case : Any = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case : int = retriever(UpperCamelCase__ , UpperCamelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCamelCase__ ) snake_case ,snake_case ,snake_case : Dict = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , np.ndarray ) snake_case : Tuple = retriever( UpperCamelCase__ , UpperCamelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCamelCase__ , return_tensors="pt" , ) snake_case ,snake_case ,snake_case ,snake_case : str = ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def lowerCamelCase ( self ) -> Any: '''simple docstring''' snake_case : Tuple = self.get_dpr_ctx_encoder_tokenizer() snake_case : Union[str, Any] = 1 snake_case : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ ) retriever.set_ctx_encoder_tokenizer(UpperCamelCase__ ) snake_case : str = [[5, 7], [10, 11]] snake_case : str = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case : Dict = retriever(UpperCamelCase__ , UpperCamelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCamelCase__ ) self.assertEqual( len(UpperCamelCase__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , UpperCamelCase__ ) # check for doc token related keys in dictionary.
117
0
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: _UpperCAmelCase = arr[i + 1], arr[i] return arr if __name__ == "__main__": __A : Union[str, Any] = list(range(10, 0, -1)) print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
602
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class SCREAMING_SNAKE_CASE_ ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int = 16 , SCREAMING_SNAKE_CASE__ : int = 88 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "geglu" , SCREAMING_SNAKE_CASE__ : Optional[int] = None , ) -> Optional[int]: super().__init__() A : Tuple =nn.ModuleList( [ TransformeraDModel( num_attention_heads=SCREAMING_SNAKE_CASE__ , attention_head_dim=SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , num_layers=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , cross_attention_dim=SCREAMING_SNAKE_CASE__ , attention_bias=SCREAMING_SNAKE_CASE__ , sample_size=SCREAMING_SNAKE_CASE__ , num_vector_embeds=SCREAMING_SNAKE_CASE__ , activation_fn=SCREAMING_SNAKE_CASE__ , num_embeds_ada_norm=SCREAMING_SNAKE_CASE__ , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference A : List[Any] =0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` A : str =[77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` A : Optional[int] =[1, 0] def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Dict: A : Any =hidden_states A : int =[] A : str =0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens A : Optional[int] =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] A : str =self.transformer_index_for_condition[i] A : str =self.transformers[transformer_index]( SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , cross_attention_kwargs=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] A : str =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) A : Any =output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=SCREAMING_SNAKE_CASE__ )
305
0
'''simple docstring''' import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter lowercase__ =True except ImportError: lowercase__ =False lowercase__ =logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCamelCase_ ( A__ ): return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class a_ ( UpperCamelCase__ ): @staticmethod def lowerCAmelCase__ ( UpperCAmelCase ): a_ = parser.add_parser("""add-new-model""" ) add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" ) add_new_model_parser.add_argument("""--testing_file""" , type=UpperCAmelCase , help="""Configuration file on which to run.""" ) add_new_model_parser.add_argument( """--path""" , type=UpperCAmelCase , help="""Path to cookiecutter. Should only be used for testing purposes.""" ) add_new_model_parser.set_defaults(func=UpperCAmelCase ) def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , *UpperCAmelCase ): a_ = testing a_ = testing_file a_ = path def lowerCAmelCase__ ( self ): warnings.warn( """The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """ """It is not actively maintained anymore, so might give a result that won't pass all tests and quality """ """checks, you should use `transformers-cli add-new-model-like` instead.""" ) if not _has_cookiecutter: raise ImportError( """Model creation dependencies are required to use the `add_new_model` command. Install them by running """ """the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory a_ = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]] if len(UpperCAmelCase ) > 0: raise ValueError( """Several directories starting with `cookiecutter-template-` in current working directory. """ """Please clean your directory by removing all folders starting with `cookiecutter-template-` or """ """change your working directory.""" ) a_ = ( Path(UpperCAmelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) a_ = path_to_transformer_root / """templates""" / """adding_a_new_model""" # Execute cookiecutter if not self._testing: cookiecutter(str(UpperCAmelCase ) ) else: with open(self._testing_file , """r""" ) as configuration_file: a_ = json.load(UpperCAmelCase ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=UpperCAmelCase , extra_context=UpperCAmelCase , ) a_ = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0] # Retrieve configuration with open(directory + """/configuration.json""" , """r""" ) as configuration_file: a_ = json.load(UpperCAmelCase ) a_ = configuration["""lowercase_modelname"""] a_ = configuration["""generate_tensorflow_pytorch_and_flax"""] os.remove(f'''{directory}/configuration.json''' ) a_ = """PyTorch""" in generate_tensorflow_pytorch_and_flax a_ = """TensorFlow""" in generate_tensorflow_pytorch_and_flax a_ = """Flax""" in generate_tensorflow_pytorch_and_flax a_ = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}''' os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase ) os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=UpperCAmelCase ) # Tests require submodules as they have parent imports with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , """w""" ): pass shutil.move( f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , ) shutil.move( f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , ) def remove_copy_lines(UpperCAmelCase ): with open(UpperCAmelCase , """r""" ) as f: a_ = f.readlines() with open(UpperCAmelCase , """w""" ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(UpperCAmelCase ) if output_pytorch: if not self._testing: remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' ) shutil.move( f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , ) shutil.move( f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , ) else: os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' ) os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' ) if output_tensorflow: if not self._testing: remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) shutil.move( f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , ) shutil.move( f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , ) else: os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' ) if output_flax: if not self._testing: remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) shutil.move( f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , ) shutil.move( f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , ) else: os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' ) shutil.move( f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , ) shutil.move( f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , ) shutil.move( f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): # Create temp file a_ , a_ = mkstemp() a_ = False with fdopen(UpperCAmelCase , """w""" ) as new_file: with open(UpperCAmelCase ) as old_file: for line in old_file: new_file.write(UpperCAmelCase ) if line_to_copy_below in line: a_ = True for line_to_copy in lines_to_copy: new_file.write(UpperCAmelCase ) if not line_found: raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' ) # Copy the file permissions from the old file to the new file copymode(UpperCAmelCase , UpperCAmelCase ) # Remove original file remove(UpperCAmelCase ) # Move new file move(UpperCAmelCase , UpperCAmelCase ) def skip_units(UpperCAmelCase ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(UpperCAmelCase ): with open(UpperCAmelCase ) as datafile: a_ = [] a_ = False a_ = False for line in datafile: if "# To replace in: " in line and "##" not in line: a_ = line.split("""\"""" )[1] a_ = skip_units(UpperCAmelCase ) elif "# Below: " in line and "##" not in line: a_ = line.split("""\"""" )[1] a_ = skip_units(UpperCAmelCase ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) a_ = [] elif "# Replace with" in line and "##" not in line: a_ = [] elif "##" not in line: lines_to_copy.append(UpperCAmelCase ) remove(UpperCAmelCase ) replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' ) os.rmdir(UpperCAmelCase )
511
'''simple docstring''' import math def UpperCamelCase_ ( A__ ): a_ = [True] * n a_ = False a_ = False a_ = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): a_ = i * 2 while index < n: a_ = False a_ = index + i a_ = [2] for i in range(3 , A__ , 2 ): if is_prime[i]: primes.append(A__ ) return primes def UpperCamelCase_ ( A__ = 99_99_66_66_33_33 ): a_ = math.floor(math.sqrt(A__ ) ) + 1_00 a_ = prime_sieve(A__ ) a_ = 0 a_ = 0 a_ = primes[prime_index] while (last_prime**2) <= limit: a_ = primes[prime_index + 1] a_ = last_prime**2 a_ = next_prime**2 # Get numbers divisible by lps(current) a_ = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) a_ = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps a_ = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair a_ = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
511
1
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class lowerCamelCase : def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_0 , lowercase__=0.0_2 , lowercase__=True , lowercase__=None , ): __UpperCAmelCase : List[Any] = parent __UpperCAmelCase : Any = batch_size __UpperCAmelCase : List[str] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : List[Any] = use_input_mask __UpperCAmelCase : Union[str, Any] = vocab_size __UpperCAmelCase : Tuple = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : int = intermediate_size __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob __UpperCAmelCase : str = max_position_embeddings __UpperCAmelCase : Optional[Any] = initializer_range __UpperCAmelCase : str = use_labels __UpperCAmelCase : Union[str, Any] = scope def A( self): __UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __UpperCAmelCase : List[Any] = None if self.use_input_mask: __UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __UpperCAmelCase : Optional[Any] = self.get_config() return config, input_ids, input_mask, token_labels def A( self): return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowercase__ , initializer_range=self.initializer_range , ) def A( self): ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : List[str] = self.prepare_config_and_inputs() __UpperCAmelCase : Any = True __UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) __UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ , ): __UpperCAmelCase : List[Any] = BertGenerationEncoder(config=lowercase__) model.to(lowercase__) model.eval() __UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__) __UpperCAmelCase : Union[str, Any] = model(lowercase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ , ): __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Dict = BertGenerationEncoder(config=lowercase__) model.to(lowercase__) model.eval() __UpperCAmelCase : List[Any] = model( lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , ) __UpperCAmelCase : Tuple = model( lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ , ): __UpperCAmelCase : Any = True __UpperCAmelCase : Dict = True __UpperCAmelCase : Optional[Any] = BertGenerationDecoder(config=lowercase__).to(lowercase__).eval() # first forward pass __UpperCAmelCase : Optional[int] = model( lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , use_cache=lowercase__ , ) __UpperCAmelCase : List[str] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size) __UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and __UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1) __UpperCAmelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1) __UpperCAmelCase : Optional[Any] = model( lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , output_hidden_states=lowercase__ , )['''hidden_states'''][0] __UpperCAmelCase : Union[str, Any] = model( lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , past_key_values=lowercase__ , output_hidden_states=lowercase__ , )['''hidden_states'''][0] # select random slice __UpperCAmelCase : str = ids_tensor((1,) , output_from_past.shape[-1]).item() __UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCAmelCase : Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3)) def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , ): __UpperCAmelCase : Optional[Any] = BertGenerationDecoder(lowercase__) model.to(lowercase__) model.eval() __UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def A( self): __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = self.prepare_config_and_inputs() __UpperCAmelCase : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): _lowerCAmelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () _lowerCAmelCase : List[str] = (BertGenerationDecoder,) if is_torch_available() else () _lowerCAmelCase : Any = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def A( self): __UpperCAmelCase : Union[str, Any] = BertGenerationEncoderTester(self) __UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7) def A( self): self.config_tester.run_common_tests() def A( self): __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase__) def A( self): __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() __UpperCAmelCase : int = '''bert''' self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__) def A( self): __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowercase__) def A( self): __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowercase__) def A( self): # This regression test was failing with PyTorch < 1.3 ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() __UpperCAmelCase : int = None self.model_tester.create_and_check_model_as_decoder( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) def A( self): __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*lowercase__) @slow def A( self): __UpperCAmelCase : str = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') self.assertIsNotNone(lowercase__) @require_torch class lowerCamelCase ( unittest.TestCase ): @slow def A( self): __UpperCAmelCase : List[str] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') __UpperCAmelCase : Dict = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]]) with torch.no_grad(): __UpperCAmelCase : Any = model(lowercase__)[0] __UpperCAmelCase : str = torch.Size([1, 8, 1_0_2_4]) self.assertEqual(output.shape , lowercase__) __UpperCAmelCase : List[Any] = torch.tensor( [[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4)) @require_torch class lowerCamelCase ( unittest.TestCase ): @slow def A( self): __UpperCAmelCase : Union[str, Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') __UpperCAmelCase : Any = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]]) with torch.no_grad(): __UpperCAmelCase : int = model(lowercase__)[0] __UpperCAmelCase : Optional[Any] = torch.Size([1, 8, 5_0_3_5_8]) self.assertEqual(output.shape , lowercase__) __UpperCAmelCase : Tuple = torch.tensor( [[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
462
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase : def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=9_9 , lowercase__=2_4 , lowercase__=2 , lowercase__=6 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=None , lowercase__=1_0_0_0 , ): __UpperCAmelCase : int = parent __UpperCAmelCase : Optional[int] = batch_size __UpperCAmelCase : Any = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : str = use_input_mask __UpperCAmelCase : Dict = use_token_type_ids __UpperCAmelCase : Tuple = use_labels __UpperCAmelCase : Union[str, Any] = vocab_size __UpperCAmelCase : Tuple = hidden_size __UpperCAmelCase : Dict = num_hidden_layers __UpperCAmelCase : Optional[Any] = num_attention_heads __UpperCAmelCase : Union[str, Any] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : int = hidden_dropout_prob __UpperCAmelCase : List[Any] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Optional[Any] = type_vocab_size __UpperCAmelCase : Optional[Any] = type_sequence_label_size __UpperCAmelCase : str = initializer_range __UpperCAmelCase : str = num_labels __UpperCAmelCase : str = scope __UpperCAmelCase : Union[str, Any] = range_bbox def A( self): __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: __UpperCAmelCase : Optional[int] = bbox[i, j, 3] __UpperCAmelCase : str = bbox[i, j, 1] __UpperCAmelCase : Union[str, Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: __UpperCAmelCase : Optional[int] = bbox[i, j, 2] __UpperCAmelCase : int = bbox[i, j, 0] __UpperCAmelCase : Dict = t __UpperCAmelCase : Tuple = None if self.use_input_mask: __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) __UpperCAmelCase : List[Any] = None if self.use_token_type_ids: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __UpperCAmelCase : List[str] = None __UpperCAmelCase : Union[str, Any] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) __UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __UpperCAmelCase : int = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def A( self): return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ): __UpperCAmelCase : List[Any] = LiltModel(config=lowercase__) model.to(lowercase__) model.eval() __UpperCAmelCase : List[Any] = model(lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__) __UpperCAmelCase : Optional[int] = model(lowercase__ , bbox=lowercase__ , token_type_ids=lowercase__) __UpperCAmelCase : Optional[int] = model(lowercase__ , bbox=lowercase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ): __UpperCAmelCase : Optional[int] = self.num_labels __UpperCAmelCase : Dict = LiltForTokenClassification(config=lowercase__) model.to(lowercase__) model.eval() __UpperCAmelCase : Dict = model( lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ): __UpperCAmelCase : Dict = LiltForQuestionAnswering(config=lowercase__) model.to(lowercase__) model.eval() __UpperCAmelCase : List[Any] = model( lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def A( self): __UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = config_and_inputs __UpperCAmelCase : int = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): _lowerCAmelCase : str = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) _lowerCAmelCase : str = ( { '''feature-extraction''': LiltModel, '''question-answering''': LiltForQuestionAnswering, '''text-classification''': LiltForSequenceClassification, '''token-classification''': LiltForTokenClassification, '''zero-shot''': LiltForSequenceClassification, } if is_torch_available() else {} ) _lowerCAmelCase : Any = False _lowerCAmelCase : List[Any] = False def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__): return True def A( self): __UpperCAmelCase : Tuple = LiltModelTester(self) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7) def A( self): self.config_tester.run_common_tests() def A( self): __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase__) def A( self): __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : Any = type self.model_tester.create_and_check_model(*lowercase__) def A( self): __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase__) def A( self): __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase__) @slow def A( self): for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Optional[Any] = LiltModel.from_pretrained(lowercase__) self.assertIsNotNone(lowercase__) @require_torch @slow class lowerCamelCase ( unittest.TestCase ): def A( self): __UpperCAmelCase : Any = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''').to(lowercase__) __UpperCAmelCase : str = torch.tensor([[1, 2]] , device=lowercase__) __UpperCAmelCase : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase__) # forward pass with torch.no_grad(): __UpperCAmelCase : Dict = model(input_ids=lowercase__ , bbox=lowercase__) __UpperCAmelCase : List[Any] = torch.Size([1, 2, 7_6_8]) __UpperCAmelCase : Union[str, Any] = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowercase__ , ) self.assertTrue(outputs.last_hidden_state.shape , lowercase__) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase__ , atol=1e-3))
462
1
"""simple docstring""" import math def __A ( a_ : list , a_ : int )-> int: '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = len(a_ ) SCREAMING_SNAKE_CASE : Optional[Any] = int(math.floor(math.sqrt(a_ ) ) ) SCREAMING_SNAKE_CASE : List[str] = 0 while arr[min(a_ , a_ ) - 1] < x: SCREAMING_SNAKE_CASE : Optional[Any] = step step += int(math.floor(math.sqrt(a_ ) ) ) if prev >= n: return -1 while arr[prev] < x: SCREAMING_SNAKE_CASE : Any = prev + 1 if prev == min(a_ , a_ ): return -1 if arr[prev] == x: return prev return -1 if __name__ == "__main__": lowerCamelCase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip() lowerCamelCase__ : List[str] = [int(item) for item in user_input.split(",")] lowerCamelCase__ : Dict = int(input("Enter the number to be searched:\n")) lowerCamelCase__ : Tuple = jump_search(arr, x) if res == -1: print("Number not found!") else: print(f'''Number {x} is at index {res}''')
18
"""simple docstring""" def __A ( a_ : int )-> list[int]: '''simple docstring''' if num <= 0: raise ValueError('''Input must be a positive integer''' ) SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1) SCREAMING_SNAKE_CASE : Optional[Any] = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , a_ ): SCREAMING_SNAKE_CASE : Any = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip()) print(prime_sieve_eratosthenes(user_num))
18
1
from __future__ import annotations from typing import Generic, TypeVar __UpperCAmelCase = TypeVar('''T''') class lowerCAmelCase_ ( Generic[T] ): def __init__( self, SCREAMING_SNAKE_CASE_ ) -> None: UpperCamelCase : Tuple = data UpperCamelCase : List[Any] = self UpperCamelCase : int = 0 class lowerCAmelCase_ ( Generic[T] ): def __init__( self ) -> None: # map from node name to the node object UpperCamelCase : dict[T, DisjointSetTreeNode[T]] = {} def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None: # create a new set with x as its member UpperCamelCase : List[Any] = DisjointSetTreeNode(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> DisjointSetTreeNode[T]: # find the set x belongs to (with path-compression) UpperCamelCase : Optional[int] = self.map[data] if elem_ref != elem_ref.parent: UpperCamelCase : Tuple = self.find_set(elem_ref.parent.data ) return elem_ref.parent def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: # helper function for union operation if nodea.rank > nodea.rank: UpperCamelCase : Dict = nodea else: UpperCamelCase : Dict = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: # merge 2 disjoint sets self.link(self.find_set(SCREAMING_SNAKE_CASE_ ), self.find_set(SCREAMING_SNAKE_CASE_ ) ) class lowerCAmelCase_ ( Generic[T] ): def __init__( self ) -> None: # connections: map from the node to the neighbouring nodes (with weights) UpperCamelCase : dict[T, dict[T, int]] = {} def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> None: # add a node ONLY if its not present in the graph if node not in self.connections: UpperCamelCase : Any = {} def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None: # add an edge with the given weight self.add_node(SCREAMING_SNAKE_CASE_ ) self.add_node(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = weight UpperCamelCase : List[str] = weight def snake_case_ ( self ) -> GraphUndirectedWeighted[T]: UpperCamelCase : List[str] = [] UpperCamelCase : Optional[Any] = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[2] ) # creating the disjoint set UpperCamelCase : int = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(SCREAMING_SNAKE_CASE_ ) # MST generation UpperCamelCase : Dict = 0 UpperCamelCase : Tuple = 0 UpperCamelCase : List[Any] = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = edges[index] index += 1 UpperCamelCase : Dict = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) disjoint_set.union(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) return graph
40
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]: UpperCamelCase : int = [1] for i in range(2 , snake_case__ ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCamelCase : List[Any] = [] UpperCamelCase : List[Any] = list(range(snake_case__ ) ) # Find permutation while factorials: UpperCamelCase : int = factorials.pop() UpperCamelCase , UpperCamelCase : int = divmod(snake_case__ , snake_case__ ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__(self , *lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ): '''simple docstring''' super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCamelCase : int = eval_examples _UpperCamelCase : str = post_process_function def lowercase_ (self , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__ = "eval" , **lowerCAmelCase__ , ): '''simple docstring''' _UpperCamelCase : Optional[int] = gen_kwargs.copy() _UpperCamelCase : int = ( gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length ) _UpperCamelCase : List[str] = ( gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams ) _UpperCamelCase : Optional[int] = gen_kwargs _UpperCamelCase : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset _UpperCamelCase : Dict = self.get_eval_dataloader(lowerCAmelCase__ ) _UpperCamelCase : Tuple = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _UpperCamelCase : Dict = self.compute_metrics _UpperCamelCase : Optional[Any] = None _UpperCamelCase : Union[str, Any] = time.time() _UpperCamelCase : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _UpperCamelCase : Optional[int] = eval_loop( lowerCAmelCase__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase__ , metric_key_prefix=lowerCAmelCase__ , ) finally: _UpperCamelCase : Dict = compute_metrics _UpperCamelCase : Optional[int] = self.args.eval_batch_size * self.args.world_size if F"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( lowerCAmelCase__ , lowerCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _UpperCamelCase : Union[str, Any] = self.post_process_function(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase : Union[str, Any] = self.compute_metrics(lowerCAmelCase__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): _UpperCamelCase : List[str] = metrics.pop(lowerCAmelCase__ ) metrics.update(output.metrics ) else: _UpperCamelCase : str = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(lowerCAmelCase__ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCAmelCase__ ) return metrics def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__ = "test" , **lowerCAmelCase__ ): '''simple docstring''' _UpperCamelCase : str = gen_kwargs.copy() _UpperCamelCase : Any = self.get_test_dataloader(lowerCAmelCase__ ) # Temporarily disable metric computation, we will do it in the loop here. _UpperCamelCase : Any = self.compute_metrics _UpperCamelCase : Union[str, Any] = None _UpperCamelCase : Optional[int] = time.time() _UpperCamelCase : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _UpperCamelCase : List[Any] = eval_loop( lowerCAmelCase__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase__ , metric_key_prefix=lowerCAmelCase__ , ) finally: _UpperCamelCase : Optional[Any] = compute_metrics _UpperCamelCase : List[str] = self.args.eval_batch_size * self.args.world_size if F"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( lowerCAmelCase__ , lowerCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _UpperCamelCase : List[Any] = self.post_process_function(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , "predict" ) _UpperCamelCase : Any = self.compute_metrics(lowerCAmelCase__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): _UpperCamelCase : Optional[Any] = metrics.pop(lowerCAmelCase__ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCAmelCase__ )
239
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _SCREAMING_SNAKE_CASE = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
239
1
"""simple docstring""" import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __magic_name__ ( A__ ): UpperCamelCase : int = (EulerDiscreteScheduler,) UpperCamelCase : int = 10 def _lowerCamelCase ( self , **__magic_name__ ): """simple docstring""" _lowerCAmelCase = { 'num_train_timesteps': 1_1_0_0, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', } config.update(**__magic_name__ ) return config def _lowerCamelCase ( self ): """simple docstring""" for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__magic_name__ ) def _lowerCamelCase ( self ): """simple docstring""" for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ): self.check_over_configs(beta_start=__magic_name__ , beta_end=__magic_name__ ) def _lowerCamelCase ( self ): """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__magic_name__ ) def _lowerCamelCase ( self ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__magic_name__ ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(self.num_inference_steps ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCAmelCase = sample.to(__magic_name__ ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase = scheduler.scale_model_input(__magic_name__ , __magic_name__ ) _lowerCAmelCase = model(__magic_name__ , __magic_name__ ) _lowerCAmelCase = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ ) _lowerCAmelCase = output.prev_sample _lowerCAmelCase = torch.sum(torch.abs(__magic_name__ ) ) _lowerCAmelCase = torch.mean(torch.abs(__magic_name__ ) ) assert abs(result_sum.item() - 10.08_07 ) < 1e-2 assert abs(result_mean.item() - 0.01_31 ) < 1e-3 def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' ) _lowerCAmelCase = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(self.num_inference_steps ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma _lowerCAmelCase = sample.to(__magic_name__ ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase = scheduler.scale_model_input(__magic_name__ , __magic_name__ ) _lowerCAmelCase = model(__magic_name__ , __magic_name__ ) _lowerCAmelCase = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ ) _lowerCAmelCase = output.prev_sample _lowerCAmelCase = torch.sum(torch.abs(__magic_name__ ) ) _lowerCAmelCase = torch.mean(torch.abs(__magic_name__ ) ) assert abs(result_sum.item() - 0.00_02 ) < 1e-2 assert abs(result_mean.item() - 2.2_676e-06 ) < 1e-3 def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**__magic_name__ ) scheduler.set_timesteps(self.num_inference_steps , device=__magic_name__ ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCAmelCase = sample.to(__magic_name__ ) for t in scheduler.timesteps: _lowerCAmelCase = scheduler.scale_model_input(__magic_name__ , __magic_name__ ) _lowerCAmelCase = model(__magic_name__ , __magic_name__ ) _lowerCAmelCase = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ ) _lowerCAmelCase = output.prev_sample _lowerCAmelCase = torch.sum(torch.abs(__magic_name__ ) ) _lowerCAmelCase = torch.mean(torch.abs(__magic_name__ ) ) assert abs(result_sum.item() - 10.08_07 ) < 1e-2 assert abs(result_mean.item() - 0.01_31 ) < 1e-3 def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**__magic_name__ , use_karras_sigmas=__magic_name__ ) scheduler.set_timesteps(self.num_inference_steps , device=__magic_name__ ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _lowerCAmelCase = sample.to(__magic_name__ ) for t in scheduler.timesteps: _lowerCAmelCase = scheduler.scale_model_input(__magic_name__ , __magic_name__ ) _lowerCAmelCase = model(__magic_name__ , __magic_name__ ) _lowerCAmelCase = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ ) _lowerCAmelCase = output.prev_sample _lowerCAmelCase = torch.sum(torch.abs(__magic_name__ ) ) _lowerCAmelCase = torch.mean(torch.abs(__magic_name__ ) ) assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1e-2 assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1e-3
589
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING A__ : Tuple = logging.get_logger(__name__) class _UpperCAmelCase ( A__ ): """simple docstring""" lowercase__ = """upernet""" def __init__( self : Dict, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : str=512, lowerCamelCase : Optional[Any]=0.02, lowerCamelCase : Optional[Any]=[1, 2, 3, 6], lowerCamelCase : Optional[int]=True, lowerCamelCase : Tuple=0.4, lowerCamelCase : Optional[int]=384, lowerCamelCase : Optional[int]=256, lowerCamelCase : Dict=1, lowerCamelCase : str=False, lowerCamelCase : List[str]=255, **lowerCamelCase : List[Any], ): '''simple docstring''' super().__init__(**lowerCamelCase ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowercase__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(lowerCamelCase, lowerCamelCase ): lowercase__ = backbone_config.get('''model_type''' ) lowercase__ = CONFIG_MAPPING[backbone_model_type] lowercase__ = config_class.from_dict(lowerCamelCase ) lowercase__ = backbone_config lowercase__ = hidden_size lowercase__ = initializer_range lowercase__ = pool_scales lowercase__ = use_auxiliary_head lowercase__ = auxiliary_loss_weight lowercase__ = auxiliary_in_channels lowercase__ = auxiliary_channels lowercase__ = auxiliary_num_convs lowercase__ = auxiliary_concat_input lowercase__ = loss_ignore_index def lowercase__ ( self : str ): '''simple docstring''' lowercase__ = copy.deepcopy(self.__dict__ ) lowercase__ = self.backbone_config.to_dict() lowercase__ = self.__class__.model_type return output
183
0
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) lowerCAmelCase_ = _symbol_database.Default() lowerCAmelCase_ = _descriptor_pool.Default().AddSerializedFile( B'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) lowerCAmelCase_ = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS is False: lowerCAmelCase_ = None lowerCAmelCase_ = B'H\003' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" lowerCAmelCase_ = 45 lowerCAmelCase_ = 15_81 lowerCAmelCase_ = 15_17 lowerCAmelCase_ = 15_70 lowerCAmelCase_ = 15_84 lowerCAmelCase_ = 17_93 lowerCAmelCase_ = 17_95 lowerCAmelCase_ = 19_16 lowerCAmelCase_ = 18_64 lowerCAmelCase_ = 19_05 lowerCAmelCase_ = 19_19 lowerCAmelCase_ = 24_29 lowerCAmelCase_ = 22_08 lowerCAmelCase_ = 24_18 lowerCAmelCase_ = 23_23 lowerCAmelCase_ = 24_07 # @@protoc_insertion_point(module_scope)
596
import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging lowerCAmelCase_ = logging.get_logger(__name__) class _A : _UpperCamelCase : Dict = None @experimental def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]: '''simple docstring''' if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) return _map_with_joblib(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' lowercase : Tuple = num_proc if num_proc <= len(__magic_name__ ) else len(__magic_name__ ) lowercase : Tuple = [] # We organize the splits ourselve (contiguous splits) for index in range(__magic_name__ ): lowercase : Optional[int] = len(__magic_name__ ) // num_proc lowercase : List[str] = len(__magic_name__ ) % num_proc lowercase : Union[str, Any] = div * index + min(__magic_name__ , __magic_name__ ) lowercase : List[str] = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(__magic_name__ ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( F"""Error dividing inputs iterable among processes. """ F"""Total number of objects {len(__magic_name__ )}, """ F"""length: {sum(len(i[1] ) for i in split_kwds )}""" ) logger.info( F"""Spawning {num_proc} processes for {len(__magic_name__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" ) lowercase , lowercase : Optional[int] = None, None if not disable_tqdm: lowercase , lowercase : Any = (RLock(),), tqdm.set_lock with Pool(__magic_name__ , initargs=__magic_name__ , initializer=__magic_name__ ) as pool: lowercase : Tuple = pool.map(__magic_name__ , __magic_name__ ) logger.info(F"""Finished {num_proc} processes""" ) lowercase : Union[str, Any] = [obj for proc_res in mapped for obj in proc_res] logger.info(F"""Unpacked {len(__magic_name__ )} objects""" ) return mapped def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any: '''simple docstring''' import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__magic_name__ ): return joblib.Parallel()( joblib.delayed(__magic_name__ )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def snake_case( __magic_name__ ) -> List[Any]: '''simple docstring''' lowercase : int = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: lowercase : List[Any] = None
596
1
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ): if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) snake_case : Optional[int] = str(bin(__lowerCamelCase ) )[2:] # remove the leading "0b" snake_case : Union[str, Any] = str(bin(__lowerCamelCase ) )[2:] # remove the leading "0b" snake_case : List[str] = max(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(__lowerCamelCase ) , b_binary.zfill(__lowerCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
204
def UpperCamelCase ( __lowerCamelCase : int = 1 , __lowerCamelCase : int = 1000 ): snake_case : int = 1 snake_case : int = 0 for divide_by_number in range(__lowerCamelCase , digit + 1 ): snake_case : list[int] = [] snake_case : Optional[int] = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(__lowerCamelCase ): snake_case : List[Any] = len(__lowerCamelCase ) snake_case : List[str] = divide_by_number else: has_been_divided.append(__lowerCamelCase ) snake_case : Any = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
204
1
from __future__ import annotations def _lowerCAmelCase ( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> None: if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): lowercase : List[Any] =array[indexa], array[indexa] def _lowerCAmelCase ( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> None: if length > 1: lowercase : Dict =int(length / 2 ) for i in range(__magic_name__ , low + middle ): comp_and_swap(__magic_name__ , __magic_name__ , i + middle , __magic_name__ ) bitonic_merge(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) bitonic_merge(__magic_name__ , low + middle , __magic_name__ , __magic_name__ ) def _lowerCAmelCase ( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> None: if length > 1: lowercase : Tuple =int(length / 2 ) bitonic_sort(__magic_name__ , __magic_name__ , __magic_name__ , 1 ) bitonic_sort(__magic_name__ , low + middle , __magic_name__ , 0 ) bitonic_merge(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) if __name__ == "__main__": UpperCamelCase_ = input("""Enter numbers separated by a comma:\n""").strip() UpperCamelCase_ = [int(item.strip()) for item in user_input.split(""",""")] bitonic_sort(unsorted, 0, len(unsorted), 1) print("""\nSorted array in ascending order is: """, end="""""") print(*unsorted, sep=""", """) bitonic_merge(unsorted, 0, len(unsorted), 0) print("""Sorted array in descending order is: """, end="""""") print(*unsorted, sep=""", """)
716
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = ["""PLBartTokenizer"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """PLBartForCausalLM""", """PLBartForConditionalGeneration""", """PLBartForSequenceClassification""", """PLBartModel""", """PLBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
88
0
"""simple docstring""" from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 lowercase__ : Union[str, Any] = { # 1536-bit 5: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 2048-bit 14: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 3072-bit 15: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 4096-bit 16: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199''' + '''FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 6144-bit 17: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08''' + '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B''' + '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9''' + '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6''' + '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8''' + '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C''' + '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718''' + '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D''' + '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D''' + '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226''' + '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC''' + '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26''' + '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB''' + '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2''' + '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127''' + '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406''' + '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918''' + '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151''' + '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03''' + '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F''' + '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B''' + '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632''' + '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E''' + '''6DCC4024FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 8192-bit 18: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD''' + '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831''' + '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B''' + '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF''' + '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6''' + '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3''' + '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328''' + '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C''' + '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE''' + '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4''' + '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300''' + '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568''' + '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9''' + '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B''' + '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A''' + '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36''' + '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1''' + '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92''' + '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47''' + '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71''' + '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, } class _UpperCAmelCase : def __init__( self : Any , lowercase_ : int = 14 ): if group not in primes: raise ValueError('''Unsupported Group''' ) snake_case_ : Union[str, Any] = primes[group]['''prime'''] snake_case_ : Union[str, Any] = primes[group]['''generator'''] snake_case_ : Union[str, Any] = int(hexlify(urandom(32 ) ) , base=16 ) def _snake_case ( self : int ): return hex(self.__private_key )[2:] def _snake_case ( self : int ): snake_case_ : List[str] = pow(self.generator , self.__private_key , self.prime ) return hex(lowercase_ )[2:] def _snake_case ( self : Optional[Any] , lowercase_ : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(lowercase_ , (self.prime - 1) // 2 , self.prime ) == 1 ) def _snake_case ( self : Any , lowercase_ : str ): snake_case_ : Any = int(lowercase_ , base=16 ) if not self.is_valid_public_key(lowercase_ ): raise ValueError('''Invalid public key''' ) snake_case_ : Tuple = pow(lowercase_ , self.__private_key , self.prime ) return shaaaa(str(lowercase_ ).encode() ).hexdigest() @staticmethod def _snake_case ( lowercase_ : int , lowercase_ : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(lowercase_ , (prime - 1) // 2 , lowercase_ ) == 1 ) @staticmethod def _snake_case ( lowercase_ : str , lowercase_ : str , lowercase_ : int = 14 ): snake_case_ : Dict = int(lowercase_ , base=16 ) snake_case_ : Tuple = int(lowercase_ , base=16 ) snake_case_ : Dict = primes[group]['''prime'''] if not DiffieHellman.is_valid_public_key_static(lowercase_ , lowercase_ ): raise ValueError('''Invalid public key''' ) snake_case_ : Optional[Any] = pow(lowercase_ , lowercase_ , lowercase_ ) return shaaaa(str(lowercase_ ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
123
"""simple docstring""" from math import sqrt def __lowercase ( _a ): assert isinstance(_a , _a ) and ( number >= 0 ), "'number' must been an int and positive" snake_case_ : List[str] = True # 0 and 1 are none primes. if number <= 1: snake_case_ : Optional[int] = False for divisor in range(2 , int(round(sqrt(_a ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: snake_case_ : List[Any] = False break # precondition assert isinstance(_a , _a ), "'status' must been from type bool" return status def __lowercase ( _a ): assert isinstance(_a , _a ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N snake_case_ : int = list(range(2 , n + 1 ) ) snake_case_ : Optional[int] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_a ) ): for j in range(i + 1 , len(_a ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): snake_case_ : List[Any] = 0 # filters actual prime numbers. snake_case_ : str = [x for x in begin_list if x != 0] # precondition assert isinstance(_a , _a ), "'ans' must been from type list" return ans def __lowercase ( _a ): assert isinstance(_a , _a ) and (n > 2), "'N' must been an int and > 2" snake_case_ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_a ): ans.append(_a ) # precondition assert isinstance(_a , _a ), "'ans' must been from type list" return ans def __lowercase ( _a ): assert isinstance(_a , _a ) and number >= 0, "'number' must been an int and >= 0" snake_case_ : Optional[int] = [] # this list will be returns of the function. # potential prime number factors. snake_case_ : Optional[Any] = 2 snake_case_ : List[str] = number if number == 0 or number == 1: ans.append(_a ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_a ): while quotient != 1: if is_prime(_a ) and (quotient % factor == 0): ans.append(_a ) quotient /= factor else: factor += 1 else: ans.append(_a ) # precondition assert isinstance(_a , _a ), "'ans' must been from type list" return ans def __lowercase ( _a ): assert isinstance(_a , _a ) and ( number >= 0 ), "'number' bust been an int and >= 0" snake_case_ : str = 0 # prime factorization of 'number' snake_case_ : Union[str, Any] = prime_factorization(_a ) snake_case_ : int = max(_a ) # precondition assert isinstance(_a , _a ), "'ans' must been from type int" return ans def __lowercase ( _a ): assert isinstance(_a , _a ) and ( number >= 0 ), "'number' bust been an int and >= 0" snake_case_ : List[Any] = 0 # prime factorization of 'number' snake_case_ : Union[str, Any] = prime_factorization(_a ) snake_case_ : int = min(_a ) # precondition assert isinstance(_a , _a ), "'ans' must been from type int" return ans def __lowercase ( _a ): assert isinstance(_a , _a ), "'number' must been an int" assert isinstance(number % 2 == 0 , _a ), "compare bust been from type bool" return number % 2 == 0 def __lowercase ( _a ): assert isinstance(_a , _a ), "'number' must been an int" assert isinstance(number % 2 != 0 , _a ), "compare bust been from type bool" return number % 2 != 0 def __lowercase ( _a ): assert ( isinstance(_a , _a ) and (number > 2) and is_even(_a ) ), "'number' must been an int, even and > 2" snake_case_ : Optional[Any] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' snake_case_ : Dict = get_prime_numbers(_a ) snake_case_ : Optional[int] = len(_a ) # run variable for while-loops. snake_case_ : List[str] = 0 snake_case_ : Optional[Any] = None # exit variable. for break up the loops snake_case_ : List[Any] = True while i < len_pn and loop: snake_case_ : Optional[int] = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: snake_case_ : List[Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_a , _a ) and (len(_a ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def __lowercase ( _a , _a ): assert ( isinstance(_a , _a ) and isinstance(_a , _a ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." snake_case_ : int = 0 while numbera != 0: snake_case_ : Optional[Any] = numbera % numbera snake_case_ : Tuple = numbera snake_case_ : str = rest # precondition assert isinstance(_a , _a ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def __lowercase ( _a , _a ): assert ( isinstance(_a , _a ) and isinstance(_a , _a ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." snake_case_ : str = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' snake_case_ : str = prime_factorization(_a ) snake_case_ : Optional[Any] = prime_factorization(_a ) elif numbera == 1 or numbera == 1: snake_case_ : Dict = [] snake_case_ : Any = [] snake_case_ : str = max(_a , _a ) snake_case_ : Union[str, Any] = 0 snake_case_ : Optional[Any] = 0 snake_case_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: snake_case_ : int = prime_fac_a.count(_a ) snake_case_ : Optional[Any] = prime_fac_a.count(_a ) for _ in range(max(_a , _a ) ): ans *= n else: snake_case_ : Optional[Any] = prime_fac_a.count(_a ) for _ in range(_a ): ans *= n done.append(_a ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: snake_case_ : Any = prime_fac_a.count(_a ) for _ in range(_a ): ans *= n done.append(_a ) # precondition assert isinstance(_a , _a ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def __lowercase ( _a ): assert isinstance(_a , _a ) and (n >= 0), "'number' must been a positive int" snake_case_ : List[Any] = 0 snake_case_ : Any = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_a ): ans += 1 # precondition assert isinstance(_a , _a ) and is_prime( _a ), "'ans' must been a prime number and from type int" return ans def __lowercase ( _a , _a ): assert ( is_prime(_a ) and is_prime(_a ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" snake_case_ : List[str] = p_number_a + 1 # jump to the next number snake_case_ : Tuple = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_a ): number += 1 while number < p_number_a: ans.append(_a ) number += 1 # fetch the next prime number. while not is_prime(_a ): number += 1 # precondition assert ( isinstance(_a , _a ) and ans[0] != p_number_a and ans[len(_a ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def __lowercase ( _a ): assert isinstance(_a , _a ) and (n >= 1), "'n' must been int and >= 1" snake_case_ : Optional[Any] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_a ) # precondition assert ans[0] == 1 and ans[len(_a ) - 1] == n, "Error in function getDivisiors(...)" return ans def __lowercase ( _a ): assert isinstance(_a , _a ) and ( number > 1 ), "'number' must been an int and >= 1" snake_case_ : List[Any] = get_divisors(_a ) # precondition assert ( isinstance(_a , _a ) and (divisors[0] == 1) and (divisors[len(_a ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def __lowercase ( _a , _a ): assert ( isinstance(_a , _a ) and isinstance(_a , _a ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. snake_case_ : List[Any] = gcd(abs(_a ) , abs(_a ) ) # precondition assert ( isinstance(_a , _a ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def __lowercase ( _a ): assert isinstance(_a , _a ) and (n >= 0), "'n' must been a int and >= 0" snake_case_ : Dict = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def __lowercase ( _a ): assert isinstance(_a , _a ) and (n >= 0), "'n' must been an int and >= 0" snake_case_ : List[Any] = 0 snake_case_ : int = 1 snake_case_ : Any = 1 # this will be return for _ in range(n - 1 ): snake_case_ : List[Any] = ans ans += fiba snake_case_ : Any = tmp return ans
123
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a (_lowerCAmelCase , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Optional[Any] = LDMTextToImagePipeline __UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } __UpperCAmelCase : int = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } __UpperCAmelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCAmelCase : int = False def __snake_case ( self : Union[str, Any] ) -> str: torch.manual_seed(0 ) __snake_case : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) __snake_case : Any = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , ) torch.manual_seed(0 ) __snake_case : Dict = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , ) torch.manual_seed(0 ) __snake_case : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) __snake_case : Any = CLIPTextModel(lowerCamelCase ) __snake_case : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __snake_case : List[str] = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def __snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any=0 ) -> str: if str(lowerCamelCase ).startswith("mps" ): __snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase ) else: __snake_case : str = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __snake_case : Dict = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : Dict ) -> Optional[Any]: __snake_case : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator __snake_case : Union[str, Any] = self.get_dummy_components() __snake_case : int = LDMTextToImagePipeline(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __snake_case : Optional[Any] = self.get_dummy_inputs(lowerCamelCase ) __snake_case : Optional[Any] = pipe(**lowerCamelCase ).images __snake_case : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) __snake_case : int = np.array([0.61_01, 0.61_56, 0.56_22, 0.48_95, 0.66_61, 0.38_04, 0.57_48, 0.61_36, 0.50_14] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class a (unittest.TestCase ): """simple docstring""" def __snake_case ( self : int ) -> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : Any=torch.floataa , lowerCamelCase : List[Any]=0 ) -> Dict: __snake_case : List[Any] = torch.manual_seed(lowerCamelCase ) __snake_case : Tuple = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 32, 32) ) __snake_case : int = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase ) __snake_case : List[str] = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : Tuple ) -> Optional[Any]: __snake_case : str = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __snake_case : List[str] = self.get_inputs(lowerCamelCase ) __snake_case : str = pipe(**lowerCamelCase ).images __snake_case : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) __snake_case : int = np.array([0.5_18_25, 0.5_28_50, 0.5_25_43, 0.5_42_58, 0.5_23_04, 0.5_25_69, 0.5_43_63, 0.5_52_76, 0.5_68_78] ) __snake_case : List[str] = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class a (unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[Any] ) -> List[Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Any , lowerCamelCase : Dict , lowerCamelCase : Any=torch.floataa , lowerCamelCase : List[Any]=0 ) -> Optional[Any]: __snake_case : int = torch.manual_seed(lowerCamelCase ) __snake_case : Tuple = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 32, 32) ) __snake_case : Optional[Any] = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase ) __snake_case : List[Any] = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : List[str] ) -> int: __snake_case : Union[str, Any] = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __snake_case : Optional[Any] = self.get_inputs(lowerCamelCase ) __snake_case : str = pipe(**lowerCamelCase ).images[0] __snake_case : Tuple = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" ) __snake_case : str = np.abs(expected_image - image ).max() assert max_diff < 1E-3
203
import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib _snake_case : int = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } _snake_case : Dict = logging.WARNING def lowerCAmelCase_ ( ): __snake_case : Union[str, Any] = os.getenv("DATASETS_VERBOSITY" , __lowerCamelCase ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F'Unknown option DATASETS_VERBOSITY={env_level_str}, ' F'has to be one of: { ", ".join(log_levels.keys() ) }' ) return _default_log_level def lowerCAmelCase_ ( ): return __name__.split("." )[0] def lowerCAmelCase_ ( ): return logging.getLogger(_get_library_name() ) def lowerCAmelCase_ ( ): # Apply our default configuration to the library root logger. __snake_case : str = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def lowerCAmelCase_ ( ): __snake_case : Dict = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def lowerCAmelCase_ ( __lowerCamelCase = None ): if name is None: __snake_case : Tuple = _get_library_name() return logging.getLogger(__lowerCamelCase ) def lowerCAmelCase_ ( ): return _get_library_root_logger().getEffectiveLevel() def lowerCAmelCase_ ( __lowerCamelCase ): _get_library_root_logger().setLevel(__lowerCamelCase ) def lowerCAmelCase_ ( ): return set_verbosity(__lowerCamelCase ) def lowerCAmelCase_ ( ): return set_verbosity(__lowerCamelCase ) def lowerCAmelCase_ ( ): return set_verbosity(__lowerCamelCase ) def lowerCAmelCase_ ( ): return set_verbosity(__lowerCamelCase ) def lowerCAmelCase_ ( ): __snake_case : List[str] = False def lowerCAmelCase_ ( ): __snake_case : Tuple = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class a : """simple docstring""" def __init__( self : int , *lowerCamelCase : Optional[Any] , **lowerCamelCase : List[str] ) -> Optional[int]: # pylint: disable=unused-argument __snake_case : int = args[0] if args else None def __iter__( self : Dict ) -> Optional[int]: return iter(self._iterator ) def __getattr__( self : int , lowerCamelCase : Optional[Any] ) -> List[Any]: def empty_fn(*lowerCamelCase : Optional[int] , **lowerCamelCase : List[Any] ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : List[str] ) -> int: return self def __exit__( self : Dict , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str ) -> Dict: return _snake_case : Optional[Any] = True class a : """simple docstring""" def __call__( self : str , *lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple=False , **lowerCamelCase : Union[str, Any] ) -> Optional[Any]: if _tqdm_active and not disable: return tqdm_lib.tqdm(*lowerCamelCase , **lowerCamelCase ) else: return EmptyTqdm(*lowerCamelCase , **lowerCamelCase ) def __snake_case ( self : str , *lowerCamelCase : str , **lowerCamelCase : Tuple ) -> str: __snake_case : Optional[Any] = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*lowerCamelCase , **lowerCamelCase ) def __snake_case ( self : List[Any] ) -> List[Any]: if _tqdm_active: return tqdm_lib.tqdm.get_lock() _snake_case : int = _tqdm_cls() def lowerCAmelCase_ ( ): global _tqdm_active return bool(_tqdm_active ) def lowerCAmelCase_ ( ): global _tqdm_active __snake_case : Tuple = True def lowerCAmelCase_ ( ): global _tqdm_active __snake_case : List[Any] = False
203
1
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class lowerCamelCase_ : '''simple docstring''' def __init__( self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str]=13 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[int]=99 , _lowerCAmelCase : int=64 , _lowerCAmelCase : str=5 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Tuple=37 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : str=512 , _lowerCAmelCase : List[str]=16 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : str=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_input_mask SCREAMING_SNAKE_CASE_ = use_token_type_ids SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = type_sequence_label_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = num_labels SCREAMING_SNAKE_CASE_ = num_choices SCREAMING_SNAKE_CASE_ = scope SCREAMING_SNAKE_CASE_ = vocab_size - 1 def lowerCAmelCase_ ( self : Any ): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE_ = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE_ = self.get_config() return config, input_ids, input_mask, token_labels def lowerCAmelCase_ ( self : Tuple ): return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def lowerCAmelCase_ ( self : str ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = True return config, input_ids, input_mask, token_labels def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ): SCREAMING_SNAKE_CASE_ = GPTNeoXModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = GPTNeoXModel(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Any ): SCREAMING_SNAKE_CASE_ = GPTNeoXForCausalLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = GPTNeoXForQuestionAnswering(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = GPTNeoXForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[str] ): SCREAMING_SNAKE_CASE_ = self.num_labels SCREAMING_SNAKE_CASE_ = GPTNeoXForTokenClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = GPTNeoXForCausalLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() # first forward pass SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE_ = torch.cat([input_mask, next_mask] , dim=-1 ) SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = output_from_no_past['hidden_states'][0] SCREAMING_SNAKE_CASE_ = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) ) def lowerCAmelCase_ ( self : int ): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowercase_ = (GPTNeoXForCausalLM,) if is_torch_available() else () lowercase_ = ( { "feature-extraction": GPTNeoXModel, "question-answering": GPTNeoXForQuestionAnswering, "text-classification": GPTNeoXForSequenceClassification, "text-generation": GPTNeoXForCausalLM, "token-classification": GPTNeoXForTokenClassification, "zero-shot": GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def lowerCAmelCase_ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE_ = GPTNeoXModelTester(self ) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=64 , num_attention_heads=8 ) def lowerCAmelCase_ ( self : Any ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : int ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict ): # This regression test was failing with PyTorch < 1.3 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_decoder() SCREAMING_SNAKE_CASE_ = None self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*_lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase ) @unittest.skip(reason='Feed forward chunking is not implemented' ) def lowerCAmelCase_ ( self : str ): pass @parameterized.expand([('linear',), ('dynamic',)] ) def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Dict ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ = ids_tensor([1, 10] , config.vocab_size ) SCREAMING_SNAKE_CASE_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE_ = GPTNeoXModel(_lowerCAmelCase ) original_model.to(_lowerCAmelCase ) original_model.eval() SCREAMING_SNAKE_CASE_ = original_model(_lowerCAmelCase ).last_hidden_state SCREAMING_SNAKE_CASE_ = original_model(_lowerCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE_ = {'type': scaling_type, 'factor': 10.0} SCREAMING_SNAKE_CASE_ = GPTNeoXModel(_lowerCAmelCase ) scaled_model.to(_lowerCAmelCase ) scaled_model.eval() SCREAMING_SNAKE_CASE_ = scaled_model(_lowerCAmelCase ).last_hidden_state SCREAMING_SNAKE_CASE_ = scaled_model(_lowerCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) ) @require_torch class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' ) for checkpointing in [True, False]: SCREAMING_SNAKE_CASE_ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = tokenizer('My favorite food is' , return_tensors='pt' ).to(_lowerCAmelCase ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 SCREAMING_SNAKE_CASE_ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure' SCREAMING_SNAKE_CASE_ = model.generate(**_lowerCAmelCase , do_sample=_lowerCAmelCase , max_new_tokens=20 ) SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_lowerCAmelCase )[0] self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
31
'''simple docstring''' def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise ValueError("""Input must be an integer""" ) if input_num <= 0: raise ValueError("""Input must be positive""" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
207
0
def A ( snake_case__ : int ) -> bool: '''simple docstring''' if p < 2: raise ValueError('p should not be less than 2!' ) elif p == 2: return True __snake_case = 4 __snake_case = (1 << p) - 1 for _ in range(p - 2 ): __snake_case = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
676
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' def count_of_possible_combinations(snake_case__ : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case__ ) def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' def count_of_possible_combinations_with_dp_array( snake_case__ : int , snake_case__ : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] __snake_case = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case__ ) for item in array ) __snake_case = answer return answer __snake_case = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ ) def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' __snake_case = [0] * (target + 1) __snake_case = 1 for i in range(1 , target + 1 ): for j in range(snake_case__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase__ : str = 3 UpperCAmelCase__ : Optional[int] = 5 UpperCAmelCase__ : Tuple = [1, 2, 5] print(combination_sum_iv(n, array, target))
676
1
import heapq as hq import math from collections.abc import Iterator class _UpperCAmelCase : '''simple docstring''' def __init__( self : Dict , lowercase_ : Optional[Any]) -> List[Any]: """simple docstring""" _UpperCamelCase = str(id_) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = [] _UpperCamelCase = {} # {vertex:distance} def __lt__( self : List[str] , lowercase_ : Union[str, Any]) -> Optional[int]: """simple docstring""" return self.key < other.key def __repr__( self : List[Any]) -> Tuple: """simple docstring""" return self.id def __UpperCAmelCase ( self : List[Any] , lowercase_ : int) -> Union[str, Any]: """simple docstring""" self.neighbors.append(lowercase_) def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : List[Any]) -> List[Any]: """simple docstring""" _UpperCamelCase = weight def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->str: '''simple docstring''' graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , a__ ) graph[b - 1].add_edge(graph[a - 1] , a__ ) def lowerCAmelCase__ ( a__ , a__ ) ->list: '''simple docstring''' _UpperCamelCase = [] for u in graph: _UpperCamelCase = math.inf _UpperCamelCase = None _UpperCamelCase = 0 _UpperCamelCase = graph[:] while q: _UpperCamelCase = min(a__ ) q.remove(a__ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): _UpperCamelCase = u _UpperCamelCase = u.edges[v.id] for i in range(1 , len(a__ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def lowerCAmelCase__ ( a__ , a__ ) ->Iterator[tuple]: '''simple docstring''' for u in graph: _UpperCamelCase = math.inf _UpperCamelCase = None _UpperCamelCase = 0 _UpperCamelCase = list(a__ ) hq.heapify(a__ ) while h: _UpperCamelCase = hq.heappop(a__ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): _UpperCamelCase = u _UpperCamelCase = u.edges[v.id] hq.heapify(a__ ) for i in range(1 , len(a__ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def lowerCAmelCase__ ( ) ->None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
547
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
547
1
'''simple docstring''' import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter a__ : str = True except ImportError: a__ : str = False a__ : int = logging.get_logger(__name__) # pylint: disable=invalid-name def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]: """simple docstring""" return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class lowerCAmelCase__ ( __UpperCAmelCase ): '''simple docstring''' @staticmethod def __snake_case ( a__ : ArgumentParser ): UpperCAmelCase = parser.add_parser('''add-new-model''' ) add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' ) add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' ) add_new_model_parser.add_argument( '''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' ) add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE ) def __init__( self : Any , a__ : bool , a__ : str , a__ : int=None , *a__ : List[Any] ): UpperCAmelCase = testing UpperCAmelCase = testing_file UpperCAmelCase = path def __snake_case ( self : List[str] ): warnings.warn( '''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ''' '''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ''' '''checks, you should use `transformers-cli add-new-model-like` instead.''' ) if not _has_cookiecutter: raise ImportError( '''Model creation dependencies are required to use the `add_new_model` command. Install them by running ''' '''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory UpperCAmelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]] if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError( '''Several directories starting with `cookiecutter-template-` in current working directory. ''' '''Please clean your directory by removing all folders starting with `cookiecutter-template-` or ''' '''change your working directory.''' ) UpperCAmelCase = ( Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) UpperCAmelCase = path_to_transformer_root / '''templates''' / '''adding_a_new_model''' # Execute cookiecutter if not self._testing: cookiecutter(str(__SCREAMING_SNAKE_CASE ) ) else: with open(self._testing_file , '''r''' ) as configuration_file: UpperCAmelCase = json.load(__SCREAMING_SNAKE_CASE ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , ) UpperCAmelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0] # Retrieve configuration with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file: UpperCAmelCase = json.load(__SCREAMING_SNAKE_CASE ) UpperCAmelCase = configuration['''lowercase_modelname'''] UpperCAmelCase = configuration['''generate_tensorflow_pytorch_and_flax'''] os.remove(f"{directory}/configuration.json" ) UpperCAmelCase = '''PyTorch''' in generate_tensorflow_pytorch_and_flax UpperCAmelCase = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax UpperCAmelCase = '''Flax''' in generate_tensorflow_pytorch_and_flax UpperCAmelCase = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}" os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=__SCREAMING_SNAKE_CASE ) # Tests require submodules as they have parent imports with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , '''w''' ): pass shutil.move( f"{directory}/__init__.py" , f"{model_dir}/__init__.py" , ) shutil.move( f"{directory}/configuration_{lowercase_model_name}.py" , f"{model_dir}/configuration_{lowercase_model_name}.py" , ) def remove_copy_lines(a__ : List[str] ): with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f: UpperCAmelCase = f.readlines() with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(__SCREAMING_SNAKE_CASE ) if output_pytorch: if not self._testing: remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py" ) shutil.move( f"{directory}/modeling_{lowercase_model_name}.py" , f"{model_dir}/modeling_{lowercase_model_name}.py" , ) shutil.move( f"{directory}/test_modeling_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , ) else: os.remove(f"{directory}/modeling_{lowercase_model_name}.py" ) os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py" ) if output_tensorflow: if not self._testing: remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py" ) shutil.move( f"{directory}/modeling_tf_{lowercase_model_name}.py" , f"{model_dir}/modeling_tf_{lowercase_model_name}.py" , ) shutil.move( f"{directory}/test_modeling_tf_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , ) else: os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py" ) os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py" ) if output_flax: if not self._testing: remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py" ) shutil.move( f"{directory}/modeling_flax_{lowercase_model_name}.py" , f"{model_dir}/modeling_flax_{lowercase_model_name}.py" , ) shutil.move( f"{directory}/test_modeling_flax_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , ) else: os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py" ) os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py" ) shutil.move( f"{directory}/{lowercase_model_name}.md" , f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , ) shutil.move( f"{directory}/tokenization_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}.py" , ) shutil.move( f"{directory}/tokenization_fast_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(a__ : str , a__ : str , a__ : List[str] ): # Create temp file UpperCAmelCase, UpperCAmelCase = mkstemp() UpperCAmelCase = False with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file: with open(__SCREAMING_SNAKE_CASE ) as old_file: for line in old_file: new_file.write(__SCREAMING_SNAKE_CASE ) if line_to_copy_below in line: UpperCAmelCase = True for line_to_copy in lines_to_copy: new_file.write(__SCREAMING_SNAKE_CASE ) if not line_found: raise ValueError(f"Line {line_to_copy_below} was not found in file." ) # Copy the file permissions from the old file to the new file copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Remove original file remove(__SCREAMING_SNAKE_CASE ) # Move new file move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def skip_units(a__ : Union[str, Any] ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(a__ : str ): with open(__SCREAMING_SNAKE_CASE ) as datafile: UpperCAmelCase = [] UpperCAmelCase = False UpperCAmelCase = False for line in datafile: if "# To replace in: " in line and "##" not in line: UpperCAmelCase = line.split('''"''' )[1] UpperCAmelCase = skip_units(__SCREAMING_SNAKE_CASE ) elif "# Below: " in line and "##" not in line: UpperCAmelCase = line.split('''"''' )[1] UpperCAmelCase = skip_units(__SCREAMING_SNAKE_CASE ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) UpperCAmelCase = [] elif "# Replace with" in line and "##" not in line: UpperCAmelCase = [] elif "##" not in line: lines_to_copy.append(__SCREAMING_SNAKE_CASE ) remove(__SCREAMING_SNAKE_CASE ) replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py" ) os.rmdir(__SCREAMING_SNAKE_CASE )
716
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: a__ : Optional[Any] = None a__ : Any = logging.get_logger(__name__) a__ : Union[str, Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} a__ : Dict = { 'vocab_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model', }, 'tokenizer_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json', }, } a__ : Optional[Any] = { 'google/fnet-base': 512, 'google/fnet-large': 512, } a__ : int = '▁' class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =VOCAB_FILES_NAMES _lowerCamelCase =PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase =["input_ids", "token_type_ids"] _lowerCamelCase =FNetTokenizer def __init__( self : List[Any] , a__ : Optional[Any]=None , a__ : Optional[int]=None , a__ : List[str]=False , a__ : Tuple=True , a__ : int=True , a__ : Optional[Any]="<unk>" , a__ : Union[str, Any]="[SEP]" , a__ : int="<pad>" , a__ : Dict="[CLS]" , a__ : int="[MASK]" , **a__ : Dict , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. UpperCAmelCase = ( AddedToken(a__ , lstrip=a__ , rstrip=a__ , normalized=a__ ) if isinstance(a__ , a__ ) else mask_token ) super().__init__( a__ , tokenizer_file=a__ , do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , **a__ , ) UpperCAmelCase = do_lower_case UpperCAmelCase = remove_space UpperCAmelCase = keep_accents UpperCAmelCase = vocab_file UpperCAmelCase = False if not self.vocab_file else True def __snake_case ( self : List[Any] , a__ : List[int] , a__ : Optional[List[int]] = None ): UpperCAmelCase = [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __snake_case ( self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None ): UpperCAmelCase = [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __snake_case ( self : Optional[Any] , a__ : str , a__ : Optional[str] = None ): if not os.path.isdir(a__ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return UpperCAmelCase = os.path.join( a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ): copyfile(self.vocab_file , a__ ) return (out_vocab_file,)
570
0
"""simple docstring""" from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def lowercase__ ( lowerCAmelCase__ : str ) -> None: '''simple docstring''' a__ , a__ : Optional[Any] = analyze_text(lowerCAmelCase__ ) a__ : List[str] = list(" " + ascii_lowercase ) # what is our total sum of probabilities. a__ : List[str] = sum(single_char_strings.values() ) # one length string a__ : str = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: a__ : List[str] = single_char_strings[ch] a__ : Optional[int] = my_str / all_sum my_fir_sum += prob * math.loga(lowerCAmelCase__ ) # entropy formula. # print entropy print(F"{round(-1 * my_fir_sum ):.1f}" ) # two len string a__ : Dict = sum(two_char_strings.values() ) a__ : Dict = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: a__ : Optional[Any] = cha + cha if sequence in two_char_strings: a__ : List[str] = two_char_strings[sequence] a__ : List[Any] = int(lowerCAmelCase__ ) / all_sum my_sec_sum += prob * math.loga(lowerCAmelCase__ ) # print second entropy print(F"{round(-1 * my_sec_sum ):.1f}" ) # print the difference between them print(F"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" ) def lowercase__ ( lowerCAmelCase__ : str ) -> tuple[dict, dict]: '''simple docstring''' a__ : Tuple = Counter() # type: ignore a__ : List[Any] = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(lowerCAmelCase__ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def lowercase__ ( ) -> int: '''simple docstring''' import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
642
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { '''configuration_informer''': [ '''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InformerForPrediction''', '''InformerModel''', '''InformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
642
1
'''simple docstring''' def lowerCamelCase__ ( _A = 1000 ): a : Union[str, Any] = 3 a : str = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F"{solution() = }")
713
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a__: def __init__( self : List[str] , __snake_case : Optional[Any] , __snake_case : Any=13 , __snake_case : int=30 , __snake_case : Union[str, Any]=2 , __snake_case : Optional[int]=3 , __snake_case : Dict=True , __snake_case : List[str]=True , __snake_case : Any=32 , __snake_case : List[Any]=5 , __snake_case : List[Any]=4 , __snake_case : Optional[int]=37 , __snake_case : int="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : List[str]=10 , __snake_case : int=0.02 , __snake_case : Optional[int]=3 , __snake_case : Tuple=0.6 , __snake_case : Union[str, Any]=None , ): a : List[str] = parent a : Tuple = batch_size a : Union[str, Any] = image_size a : List[str] = patch_size a : Optional[Any] = num_channels a : Optional[Any] = is_training a : List[Any] = use_labels a : Union[str, Any] = hidden_size a : Dict = num_hidden_layers a : Optional[Any] = num_attention_heads a : Optional[Any] = intermediate_size a : int = hidden_act a : Dict = hidden_dropout_prob a : Optional[Any] = attention_probs_dropout_prob a : Optional[Any] = type_sequence_label_size a : Optional[Any] = initializer_range a : Union[str, Any] = mask_ratio a : Optional[int] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) a : Tuple = (image_size // patch_size) ** 2 a : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowercase_ ( self : List[str] ): a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a : Tuple = None if self.use_labels: a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a : Optional[int] = self.get_config() return config, pixel_values, labels def lowercase_ ( self : str ): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowercase_ ( self : Dict , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] ): a : Union[str, Any] = ViTMAEModel(config=__snake_case ) model.to(__snake_case ) model.eval() a : Optional[Any] = model(__snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Dict ): a : Dict = ViTMAEForPreTraining(__snake_case ) model.to(__snake_case ) model.eval() a : Dict = model(__snake_case ) a : str = (self.image_size // self.patch_size) ** 2 a : int = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images a : Any = 1 a : Optional[int] = ViTMAEForPreTraining(__snake_case ) model.to(__snake_case ) model.eval() a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a : Optional[Any] = model(__snake_case ) a : Dict = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowercase_ ( self : Dict ): a : List[str] = self.prepare_config_and_inputs() a , a , a : Any = config_and_inputs a : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () lowercase__ = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {} lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def lowercase_ ( self : Tuple ): a : List[str] = ViTMAEModelTester(self ) a : List[str] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 ) def lowercase_ ( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def lowercase_ ( self : List[str] ): pass def lowercase_ ( self : Dict ): a , a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : Optional[Any] = model_class(__snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) ) def lowercase_ ( self : List[str] ): a , a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : Any = model_class(__snake_case ) a : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a : int = [*signature.parameters.keys()] a : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __snake_case ) def lowercase_ ( self : Any ): a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def lowercase_ ( self : List[Any] ): a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__snake_case ) def lowercase_ ( self : List[str] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ): # make masks reproducible np.random.seed(2 ) a : Union[str, Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) a : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) a : List[Any] = torch.from_numpy(__snake_case ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument a : List[str] = pt_noise super().check_pt_tf_models(__snake_case , __snake_case , __snake_case ) def lowercase_ ( self : Optional[int] ): a , a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : Any = model_class(__snake_case ) model.to(__snake_case ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): a : Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) ) a : List[str] = outputs[0].cpu().numpy() a : str = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__snake_case ) a : Dict = model_class.from_pretrained(__snake_case ) model.to(__snake_case ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): a : int = model(**self._prepare_for_class(__snake_case , __snake_case ) ) # Make sure we don't have nans a : List[Any] = after_outputs[0].cpu().numpy() a : Any = 0 a : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__snake_case , 1e-5 ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def lowercase_ ( self : Any ): pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def lowercase_ ( self : int ): pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def lowercase_ ( self : int ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self : Any ): pass @slow def lowercase_ ( self : Dict ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : str = ViTMAEModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def lowerCamelCase__ ( ): a : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class a__( unittest.TestCase ): @cached_property def lowercase_ ( self : Union[str, Any] ): return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def lowercase_ ( self : Union[str, Any] ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) a : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(__snake_case ) a : str = self.default_image_processor a : Dict = prepare_img() a : Union[str, Any] = image_processor(images=__snake_case , return_tensors='pt' ).to(__snake_case ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) a : Tuple = ViTMAEConfig() a : Optional[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) a : List[str] = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): a : Tuple = model(**__snake_case , noise=torch.from_numpy(__snake_case ).to(device=__snake_case ) ) # verify the logits a : int = torch.Size((1, 1_96, 7_68) ) self.assertEqual(outputs.logits.shape , __snake_case ) a : Optional[Any] = torch.tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__snake_case ) , atol=1e-4 ) )
195
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" __A = botoa.client("iam" ) __A = { "Version": "2012-10-17", "Statement": [ {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=a_ , AssumeRolePolicyDocument=json.dumps(a_ , indent=2 ) ) __A = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "sagemaker:*", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "ecr:BatchCheckLayerAvailability", "ecr:GetAuthorizationToken", "cloudwatch:PutMetricData", "cloudwatch:GetMetricData", "cloudwatch:GetMetricStatistics", "cloudwatch:ListMetrics", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:PutLogEvents", "logs:GetLogEvents", "s3:CreateBucket", "s3:ListBucket", "s3:GetBucketLocation", "s3:GetObject", "s3:PutObject", ], "Resource": "*", } ], } # attach policy to role iam_client.put_role_policy( RoleName=a_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(a_ , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(F'''role {role_name} already exists. Using existing one''' ) def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" __A = botoa.client("iam" ) return iam_client.get_role(RoleName=a_ )["Role"]["Arn"] def UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" __A = _ask_options( "How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , a_ , ) __A = None if credentials_configuration == 0: __A = _ask_field("Enter your AWS Profile name: [default] " , default="default" ) __A = aws_profile else: print( "Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with," "`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" ) __A = _ask_field("AWS Access Key ID: " ) __A = aws_access_key_id __A = _ask_field("AWS Secret Access Key: " ) __A = aws_secret_access_key __A = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" ) __A = aws_region __A = _ask_options( "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , a_ , ) if role_management == 0: __A = _ask_field("Enter your IAM role name: " ) else: __A = "accelerate_sagemaker_execution_role" print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(a_ ) __A = _ask_field( "Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , ) __A = None if is_custom_docker_image: __A = _ask_field("Enter your Docker image: " , lambda a_ : str(a_ ).lower() ) __A = _ask_field( "Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , ) __A = None if is_sagemaker_inputs_enabled: __A = _ask_field( "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda a_ : str(a_ ).lower() , ) __A = _ask_field( "Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , ) __A = None if is_sagemaker_metrics_enabled: __A = _ask_field( "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda a_ : str(a_ ).lower() , ) __A = _ask_options( "What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , ) __A = {} __A = _ask_field( "Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , ) if use_dynamo: __A = "dynamo_" __A = _ask_options( "Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) __A = _ask_field( "Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , ) if use_custom_options: __A = _ask_options( "Which mode do you want to use?" , a_ , lambda a_ : TORCH_DYNAMO_MODES[int(a_ )] , default="default" , ) __A = _ask_field( "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , ) __A = _ask_field( "Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , ) __A = "Which EC2 instance type you want to use for your training?" if distributed_type != SageMakerDistributedType.NO: __A = _ask_options( a_ , a_ , lambda a_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(a_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __A = _ask_field(a_ , lambda a_ : str(a_ ).lower() , default="ml.p3.2xlarge" ) __A = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __A = _ask_field( "How many machines do you want use? [1]: " , a_ , default=1 , ) __A = _ask_options( "Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." ) return SageMakerConfig( image_uri=a_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=a_ , use_cpu=a_ , dynamo_config=a_ , eca_instance_type=a_ , profile=a_ , region=a_ , iam_role_name=a_ , mixed_precision=a_ , num_machines=a_ , sagemaker_inputs_file=a_ , sagemaker_metrics_file=a_ , )
55
import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case="shi-labs/oneformer_demo" ) -> Any: with open(hf_hub_download(__snake_case , __snake_case , repo_type="""dataset""" ) , """r""" ) as f: _UpperCAmelCase = json.load(__snake_case ) _UpperCAmelCase = {} _UpperCAmelCase = [] _UpperCAmelCase = [] for key, info in class_info.items(): _UpperCAmelCase = info["""name"""] class_names.append(info["""name"""] ) if info["isthing"]: thing_ids.append(int(__snake_case ) ) _UpperCAmelCase = thing_ids _UpperCAmelCase = class_names return metadata class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any]=7 , lowerCamelCase : str=3 , lowerCamelCase : Union[str, Any]=30 , lowerCamelCase : Optional[int]=400 , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : int=True , lowerCamelCase : List[str]=True , lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , lowerCamelCase : Tuple=10 , lowerCamelCase : str=False , lowerCamelCase : Union[str, Any]=255 , lowerCamelCase : Tuple="shi-labs/oneformer_demo" , lowerCamelCase : Tuple="ade20k_panoptic.json" , lowerCamelCase : Optional[Any]=10 , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = {"""shortest_edge""": 32, """longest_edge""": 1333} if size is None else size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std _UpperCAmelCase = class_info_file _UpperCAmelCase = prepare_metadata(lowerCamelCase , lowerCamelCase ) _UpperCAmelCase = num_text _UpperCAmelCase = repo_path # for the post_process_functions _UpperCAmelCase = 2 _UpperCAmelCase = 10 _UpperCAmelCase = 10 _UpperCAmelCase = 3 _UpperCAmelCase = 4 _UpperCAmelCase = num_labels _UpperCAmelCase = do_reduce_labels _UpperCAmelCase = ignore_index def lowerCamelCase ( self : int ) -> Union[str, Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def lowerCamelCase ( self : Any , lowerCamelCase : int , lowerCamelCase : Tuple=False ) -> Any: """simple docstring""" if not batched: _UpperCAmelCase = image_inputs[0] if isinstance(lowerCamelCase , Image.Image ): _UpperCAmelCase , _UpperCAmelCase = image.size else: _UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2] if w < h: _UpperCAmelCase = int(self.size["""shortest_edge"""] * h / w ) _UpperCAmelCase = self.size["""shortest_edge"""] elif w > h: _UpperCAmelCase = self.size["""shortest_edge"""] _UpperCAmelCase = int(self.size["""shortest_edge"""] * w / h ) else: _UpperCAmelCase = self.size["""shortest_edge"""] _UpperCAmelCase = self.size["""shortest_edge"""] else: _UpperCAmelCase = [] for image in image_inputs: _UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCAmelCase = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0] _UpperCAmelCase = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1] return expected_height, expected_width def lowerCamelCase ( self : str ) -> Optional[int]: """simple docstring""" return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string _lowerCamelCase = image_processing_class def lowerCamelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = OneFormerImageProcessorTester(self ) @property def lowerCamelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" return self.image_processing_tester.prepare_image_processor_dict() def lowerCamelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase , """image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase , """size""" ) ) self.assertTrue(hasattr(lowerCamelCase , """ignore_index""" ) ) self.assertTrue(hasattr(lowerCamelCase , """class_info_file""" ) ) self.assertTrue(hasattr(lowerCamelCase , """num_text""" ) ) self.assertTrue(hasattr(lowerCamelCase , """repo_path""" ) ) self.assertTrue(hasattr(lowerCamelCase , """metadata""" ) ) self.assertTrue(hasattr(lowerCamelCase , """do_reduce_labels""" ) ) def lowerCamelCase ( self : Tuple ) -> Any: """simple docstring""" pass def lowerCamelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" # Initialize image_processor _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , Image.Image ) # Test not batched input _UpperCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase ) _UpperCAmelCase = image_processor( lowerCamelCase , ["""semantic"""] * len(lowerCamelCase ) , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" # Initialize image_processor _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase ) _UpperCAmelCase = image_processor( lowerCamelCase , ["""semantic"""] * len(lowerCamelCase ) , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase ( self : List[str] ) -> Tuple: """simple docstring""" # Initialize image_processor _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values _UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCAmelCase , _UpperCAmelCase = self.image_processing_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase ) _UpperCAmelCase = image_processor( lowerCamelCase , ["""semantic"""] * len(lowerCamelCase ) , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase ( self : Tuple , lowerCamelCase : Tuple=False , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : Union[str, Any]="np" ) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # prepare image and target _UpperCAmelCase = self.image_processing_tester.num_labels _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase ) if with_segmentation_maps: _UpperCAmelCase = num_labels if is_instance_map: _UpperCAmelCase = list(range(lowerCamelCase ) ) * 2 _UpperCAmelCase = dict(enumerate(lowerCamelCase ) ) _UpperCAmelCase = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": _UpperCAmelCase = [Image.fromarray(lowerCamelCase ) for annotation in annotations] _UpperCAmelCase = image_processor( lowerCamelCase , ["""semantic"""] * len(lowerCamelCase ) , lowerCamelCase , return_tensors="""pt""" , instance_id_to_semantic_id=lowerCamelCase , pad_and_return_pixel_mask=lowerCamelCase , ) return inputs def lowerCamelCase ( self : Any ) -> Dict: """simple docstring""" pass def lowerCamelCase ( self : Dict ) -> List[Any]: """simple docstring""" def common(lowerCamelCase : List[Any]=False , lowerCamelCase : List[Any]=None ): _UpperCAmelCase = self.comm_get_image_processor_inputs( with_segmentation_maps=lowerCamelCase , is_instance_map=lowerCamelCase , segmentation_type=lowerCamelCase ) _UpperCAmelCase = inputs["""mask_labels"""] _UpperCAmelCase = inputs["""class_labels"""] _UpperCAmelCase = inputs["""pixel_values"""] _UpperCAmelCase = inputs["""text_inputs"""] # check the batch_size for mask_label, class_label, text_input in zip(lowerCamelCase , lowerCamelCase , lowerCamelCase ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(lowerCamelCase ) , self.image_processing_tester.num_text ) common() common(is_instance_map=lowerCamelCase ) common(is_instance_map=lowerCamelCase , segmentation_type="""pil""" ) common(is_instance_map=lowerCamelCase , segmentation_type="""pil""" ) def lowerCamelCase ( self : int ) -> List[Any]: """simple docstring""" _UpperCAmelCase = np.zeros((20, 50) ) _UpperCAmelCase = 1 _UpperCAmelCase = 1 _UpperCAmelCase = 1 _UpperCAmelCase = binary_mask_to_rle(lowerCamelCase ) self.assertEqual(len(lowerCamelCase ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , ) _UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs() _UpperCAmelCase = fature_extractor.post_process_semantic_segmentation(lowerCamelCase ) self.assertEqual(len(lowerCamelCase ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) _UpperCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )] _UpperCAmelCase = fature_extractor.post_process_semantic_segmentation(lowerCamelCase , target_sizes=lowerCamelCase ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def lowerCamelCase ( self : Dict ) -> Any: """simple docstring""" _UpperCAmelCase = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , ) _UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs() _UpperCAmelCase = image_processor.post_process_instance_segmentation(lowerCamelCase , threshold=0 ) self.assertTrue(len(lowerCamelCase ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("""segmentation""" in el ) self.assertTrue("""segments_info""" in el ) self.assertEqual(type(el["""segments_info"""] ) , lowerCamelCase ) self.assertEqual( el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def lowerCamelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , ) _UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs() _UpperCAmelCase = image_processor.post_process_panoptic_segmentation(lowerCamelCase , threshold=0 ) self.assertTrue(len(lowerCamelCase ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("""segmentation""" in el ) self.assertTrue("""segments_info""" in el ) self.assertEqual(type(el["""segments_info"""] ) , lowerCamelCase ) self.assertEqual( el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
108
0
'''simple docstring''' from __future__ import annotations class A__ : def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[Any]=None ) -> Dict: '''simple docstring''' _a : str =data _a : Tuple =None def __repr__( self :Any ) -> List[Any]: '''simple docstring''' _a : Tuple =[] _a : Any =self while temp: string_rep.append(f"{temp.data}" ) _a : Dict =temp.next return "->".join(SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ) -> List[Any]: if not elements_list: raise Exception("""The Elements List is empty""" ) _a : Any =Node(elements_list[0] ) for i in range(1 ,len(_UpperCAmelCase ) ): _a : Union[str, Any] =Node(elements_list[i] ) _a : str =current.next return head def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Node ) -> None: if head_node is not None and isinstance(_UpperCAmelCase ,_UpperCAmelCase ): print_reverse(head_node.next ) print(head_node.data ) def SCREAMING_SNAKE_CASE_ ( ) -> str: from doctest import testmod testmod() _a : List[Any] =make_linked_list([14, 52, 14, 12, 43] ) print("""Linked List:""" ) print(_UpperCAmelCase ) print("""Elements in Reverse:""" ) print_reverse(_UpperCAmelCase ) if __name__ == "__main__": main()
506
'''simple docstring''' from functools import lru_cache def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> set: _a : Any =2 _a : Tuple =set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(_UpperCAmelCase ) if n > 1: factors.add(_UpperCAmelCase ) return factors @lru_cache def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int: return len(unique_prime_factors(_UpperCAmelCase ) ) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ) -> bool: return len(set(_UpperCAmelCase ) ) in (0, 1) def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> list: _a : int =2 while True: # Increment each value of a generated range _a : str =[base + i for i in range(_UpperCAmelCase )] # Run elements through out unique_prime_factors function # Append our target number to the end. _a : List[Any] =[upf_len(_UpperCAmelCase ) for x in group] checker.append(_UpperCAmelCase ) # If all numbers in the list are equal, return the group variable. if equality(_UpperCAmelCase ): return group # Increment our base variable by 1 base += 1 def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4 ) -> int: _a : Optional[int] =run(_UpperCAmelCase ) return results[0] if len(_UpperCAmelCase ) else None if __name__ == "__main__": print(solution())
506
1
'''simple docstring''' import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline _lowerCAmelCase :Optional[int] = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCAmelCase ( datasets.BuilderConfig ): '''simple docstring''' snake_case__ : Optional[datasets.Features] = None snake_case__ : str = "utf-8" snake_case__ : Optional[str] = None snake_case__ : Optional[str] = None snake_case__ : bool = True # deprecated snake_case__ : Optional[int] = None # deprecated snake_case__ : int = 1_0 << 2_0 # 10MB snake_case__ : Optional[bool] = None class UpperCAmelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' snake_case__ : List[str] = JsonConfig def _UpperCamelCase ( self ) -> Dict: if self.config.block_size is not None: logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' ) SCREAMING_SNAKE_CASE : str = self.config.block_size if self.config.use_threads is not True: logger.warning( 'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' ) if self.config.newlines_in_values is not None: raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' ) return datasets.DatasetInfo(features=self.config.features ) def _UpperCamelCase ( self , lowercase__ ) -> str: if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) SCREAMING_SNAKE_CASE : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowercase__ , (str, list, tuple) ): SCREAMING_SNAKE_CASE : Optional[Any] = data_files if isinstance(lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE : Optional[Any] = [files] SCREAMING_SNAKE_CASE : Tuple = [dl_manager.iter_files(lowercase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] SCREAMING_SNAKE_CASE : Union[str, Any] = [] for split_name, files in data_files.items(): if isinstance(lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE : Any = [files] SCREAMING_SNAKE_CASE : Any = [dl_manager.iter_files(lowercase__ ) for file in files] splits.append(datasets.SplitGenerator(name=lowercase__ , gen_kwargs={'files': files} ) ) return splits def _UpperCamelCase ( self , lowercase__ ) -> pa.Table: if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): SCREAMING_SNAKE_CASE : Tuple = self.config.features.arrow_schema.field(lowercase__ ).type SCREAMING_SNAKE_CASE : Any = pa_table.append_column(lowercase__ , pa.array([None] * len(lowercase__ ) , type=lowercase__ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE : List[Any] = table_cast(lowercase__ , self.config.features.arrow_schema ) return pa_table def _UpperCamelCase ( self , lowercase__ ) -> int: for file_idx, file in enumerate(itertools.chain.from_iterable(lowercase__ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(lowercase__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: SCREAMING_SNAKE_CASE : str = json.load(lowercase__ ) # We keep only the field we are interested in SCREAMING_SNAKE_CASE : Union[str, Any] = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(lowercase__ , (list, tuple) ): SCREAMING_SNAKE_CASE : Optional[int] = set().union(*[row.keys() for row in dataset] ) SCREAMING_SNAKE_CASE : Union[str, Any] = {col: [row.get(lowercase__ ) for row in dataset] for col in keys} else: SCREAMING_SNAKE_CASE : Union[str, Any] = dataset SCREAMING_SNAKE_CASE : str = pa.Table.from_pydict(lowercase__ ) yield file_idx, self._cast_table(lowercase__ ) # If the file has one json object per line else: with open(lowercase__ , 'rb' ) as f: SCREAMING_SNAKE_CASE : List[str] = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small SCREAMING_SNAKE_CASE : List[Any] = max(self.config.chunksize // 32 , 16 << 10 ) SCREAMING_SNAKE_CASE : str = ( self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' ) while True: SCREAMING_SNAKE_CASE : Union[str, Any] = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(lowercase__ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": SCREAMING_SNAKE_CASE : List[str] = batch.decode(self.config.encoding , errors=lowercase__ ).encode('utf-8' ) try: while True: try: SCREAMING_SNAKE_CASE : Optional[Any] = paj.read_json( io.BytesIO(lowercase__ ) , read_options=paj.ReadOptions(block_size=lowercase__ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(lowercase__ , pa.ArrowInvalid ) and "straddling" not in str(lowercase__ ) or block_size > len(lowercase__ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(lowercase__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( lowercase__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: SCREAMING_SNAKE_CASE : str = json.load(lowercase__ ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(lowercase__ )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(lowercase__ , lowercase__ ): # list is the only sequence type supported in JSON try: SCREAMING_SNAKE_CASE : Union[str, Any] = set().union(*[row.keys() for row in dataset] ) SCREAMING_SNAKE_CASE : Any = {col: [row.get(lowercase__ ) for row in dataset] for col in keys} SCREAMING_SNAKE_CASE : Optional[int] = pa.Table.from_pydict(lowercase__ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(lowercase__ )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(lowercase__ ) break else: logger.error(F"""Failed to read file '{file}' with error {type(lowercase__ )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowercase__ ) batch_idx += 1
251
'''simple docstring''' _lowerCAmelCase :Union[str, Any] = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) _lowerCAmelCase :Union[str, Any] = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def __lowerCAmelCase ( a_ , a_ , a_ ) -> float: '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = from_type.lower().strip('s' ) SCREAMING_SNAKE_CASE : Union[str, Any] = to_type.lower().strip('s' ) SCREAMING_SNAKE_CASE : Dict = UNIT_SYMBOL.get(a_ , a_ ) SCREAMING_SNAKE_CASE : Optional[int] = UNIT_SYMBOL.get(a_ , a_ ) if from_sanitized not in METRIC_CONVERSION: SCREAMING_SNAKE_CASE : Any = ( f"""Invalid 'from_type' value: {from_type!r}.\n""" f"""Conversion abbreviations are: {", ".join(a_ )}""" ) raise ValueError(a_ ) if to_sanitized not in METRIC_CONVERSION: SCREAMING_SNAKE_CASE : int = ( f"""Invalid 'to_type' value: {to_type!r}.\n""" f"""Conversion abbreviations are: {", ".join(a_ )}""" ) raise ValueError(a_ ) SCREAMING_SNAKE_CASE : Dict = METRIC_CONVERSION[from_sanitized] SCREAMING_SNAKE_CASE : List[str] = METRIC_CONVERSION[to_sanitized] SCREAMING_SNAKE_CASE : Dict = 1 if from_exponent > to_exponent: SCREAMING_SNAKE_CASE : Any = from_exponent - to_exponent else: SCREAMING_SNAKE_CASE : Tuple = -(to_exponent - from_exponent) return value * pow(10 , a_ ) if __name__ == "__main__": from doctest import testmod testmod()
251
1
from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL UpperCamelCase : Any = logging.get_logger(__name__) def UpperCamelCase_ ( __a ) -> List[List[ImageInput]]: if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__a ): return [[videos]] raise ValueError(f'''Could not make batched video from {videos}''' ) class A__ ( A__ ): """simple docstring""" _lowercase = ['pixel_values'] def __init__( self : str , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[int, float] = 1 / 255 , lowerCamelCase__ : bool = True , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , **lowerCamelCase__ : Optional[Any] , ): super().__init__(**lowerCamelCase__ ) a__ : Optional[int] = size if size is not None else {"shortest_edge": 256} a__ : Optional[int] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) a__ : List[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224} a__ : Optional[int] = get_size_dict(lowerCamelCase__ , param_name="crop_size" ) a__ : Union[str, Any] = do_resize a__ : Dict = size a__ : Optional[int] = do_center_crop a__ : List[Any] = crop_size a__ : Optional[int] = resample a__ : int = do_rescale a__ : int = rescale_factor a__ : int = offset a__ : Tuple = do_normalize a__ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN a__ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[str] , ): a__ : Any = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) if "shortest_edge" in size: a__ : List[str] = get_resize_output_image_size(lowerCamelCase__ , size["shortest_edge"] , default_to_square=lowerCamelCase__ ) elif "height" in size and "width" in size: a__ : Optional[Any] = (size["height"], size["width"]) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : int , ): a__ : Any = get_size_dict(lowerCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(lowerCamelCase__ , size=(size["height"], size["width"]) , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Any , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[int, float] , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Any , ): a__ : Optional[Any] = image.astype(np.floataa ) if offset: a__ : str = image - (scale / 2) return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[str] , ): return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. a__ : Any = to_numpy_array(lowerCamelCase__ ) if do_resize: a__ : Tuple = self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) if do_center_crop: a__ : Tuple = self.center_crop(lowerCamelCase__ , size=lowerCamelCase__ ) if do_rescale: a__ : Optional[int] = self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ , offset=lowerCamelCase__ ) if do_normalize: a__ : List[Any] = self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) a__ : Any = to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) return image def _UpperCamelCase( self : Dict , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase__ : List[Any] , ): a__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize a__ : str = resample if resample is not None else self.resample a__ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop a__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale a__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor a__ : Any = offset if offset is not None else self.offset a__ : Any = do_normalize if do_normalize is not None else self.do_normalize a__ : Any = image_mean if image_mean is not None else self.image_mean a__ : Optional[Any] = image_std if image_std is not None else self.image_std a__ : Any = size if size is not None else self.size a__ : Any = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) a__ : Any = crop_size if crop_size is not None else self.crop_size a__ : List[str] = get_size_dict(lowerCamelCase__ , param_name="crop_size" ) if not valid_images(lowerCamelCase__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) a__ : int = make_batched(lowerCamelCase__ ) a__ : Any = [ [ self._preprocess_image( image=lowerCamelCase__ , do_resize=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , do_center_crop=lowerCamelCase__ , crop_size=lowerCamelCase__ , do_rescale=lowerCamelCase__ , rescale_factor=lowerCamelCase__ , offset=lowerCamelCase__ , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , data_format=lowerCamelCase__ , ) for img in video ] for video in videos ] a__ : Union[str, Any] = {"pixel_values": videos} return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
151
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
151
1
'''simple docstring''' UpperCamelCase_ = """Alexander Joslin""" import operator as op from .stack import Stack def _lowerCAmelCase ( __magic_name__ : str ) -> int: lowercase : Dict ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub} lowercase : Stack[int] =Stack() lowercase : Stack[str] =Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(__magic_name__ ) ) elif i in operators: # RULE 2 operator_stack.push(__magic_name__ ) elif i == ")": # RULE 4 lowercase : List[str] =operator_stack.peek() operator_stack.pop() lowercase : Dict =operand_stack.peek() operand_stack.pop() lowercase : Tuple =operand_stack.peek() operand_stack.pop() lowercase : Union[str, Any] =operators[opr](__magic_name__ , __magic_name__ ) operand_stack.push(__magic_name__ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": UpperCamelCase_ = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
92
from ..utils import DummyObject, requires_backends class a ( metaclass=__SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase_ : Optional[int] = ['note_seq'] def __init__( self : Dict , *lowerCamelCase__ : int , **lowerCamelCase__ : List[str] ) -> str: """simple docstring""" requires_backends(self , ['''note_seq'''] ) @classmethod def UpperCAmelCase_ ( cls : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : int ) -> Optional[int]: """simple docstring""" requires_backends(cls , ['''note_seq'''] ) @classmethod def UpperCAmelCase_ ( cls : Tuple , *lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[Any] ) -> List[str]: """simple docstring""" requires_backends(cls , ['''note_seq'''] )
332
0
'''simple docstring''' import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ): lowerCAmelCase : Optional[Any] = PriorTransformer lowerCAmelCase : Dict = "hidden_states" @property def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]: '''simple docstring''' _UpperCAmelCase : List[str] = 4 _UpperCAmelCase : List[str] = 8 _UpperCAmelCase : int = 7 _UpperCAmelCase : Dict = floats_tensor((batch_size, embedding_dim) ).to(lowerCamelCase__ ) _UpperCAmelCase : Union[str, Any] = floats_tensor((batch_size, embedding_dim) ).to(lowerCamelCase__ ) _UpperCAmelCase : str = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase__ ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Optional[int]=0 ) ->Optional[int]: '''simple docstring''' torch.manual_seed(lowerCamelCase__ ) _UpperCAmelCase : Union[str, Any] = 4 _UpperCAmelCase : Any = 8 _UpperCAmelCase : int = 7 _UpperCAmelCase : List[Any] = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase__ ) _UpperCAmelCase : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase__ ) _UpperCAmelCase : str = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase__ ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]: '''simple docstring''' return (4, 8) @property def lowerCAmelCase__ ( self : Optional[int] ) ->int: '''simple docstring''' return (4, 8) def lowerCAmelCase__ ( self : List[Any] ) ->List[str]: '''simple docstring''' _UpperCAmelCase : str = { "num_attention_heads": 2, "attention_head_dim": 4, "num_layers": 2, "embedding_dim": 8, "num_embeddings": 7, "additional_embeddings": 4, } _UpperCAmelCase : Union[str, Any] = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase : Tuple = PriorTransformer.from_pretrained( "hf-internal-testing/prior-dummy" , output_loading_info=lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(lowerCamelCase__ ) _UpperCAmelCase : Dict = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def lowerCAmelCase__ ( self : Dict ) ->List[Any]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase : List[Any] = self.prepare_init_args_and_inputs_for_common() _UpperCAmelCase : Optional[int] = self.model_class(**lowerCamelCase__ ) _UpperCAmelCase : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase : Optional[int] = [*signature.parameters.keys()] _UpperCAmelCase : List[str] = ["hidden_states", "timestep"] self.assertListEqual(arg_names[:2] , lowerCamelCase__ ) def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Optional[int] = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" ) _UpperCAmelCase : Any = model.to(lowerCamelCase__ ) if hasattr(lowerCamelCase__ , "set_default_attn_processor" ): model.set_default_attn_processor() _UpperCAmelCase : Any = self.get_dummy_seed_input() with torch.no_grad(): _UpperCAmelCase : str = model(**lowerCamelCase__ )[0] _UpperCAmelCase : int = output[0, :5].flatten().cpu() print(lowerCamelCase__ ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. _UpperCAmelCase : Dict = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] ) self.assertTrue(torch_all_close(lowerCamelCase__ , lowerCamelCase__ , rtol=1E-2 ) ) @slow class lowerCAmelCase__ ( unittest.TestCase ): def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Tuple=1 , lowerCamelCase__ : List[Any]=7_68 , lowerCamelCase__ : Tuple=77 , lowerCamelCase__ : Dict=0 ) ->Union[str, Any]: '''simple docstring''' torch.manual_seed(lowerCamelCase__ ) _UpperCAmelCase : Optional[int] = batch_size _UpperCAmelCase : Any = embedding_dim _UpperCAmelCase : int = num_embeddings _UpperCAmelCase : Tuple = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase__ ) _UpperCAmelCase : Any = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase__ ) _UpperCAmelCase : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase__ ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]], [37, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]], # fmt: on ] ) def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] ) ->int: '''simple docstring''' _UpperCAmelCase : Any = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" ) model.to(lowerCamelCase__ ) _UpperCAmelCase : List[Any] = self.get_dummy_seed_input(seed=lowerCamelCase__ ) with torch.no_grad(): _UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )[0] assert list(sample.shape ) == [1, 7_68] _UpperCAmelCase : Optional[int] = sample[0, :8].flatten().cpu() print(lowerCamelCase__ ) _UpperCAmelCase : Tuple = torch.tensor(lowerCamelCase__ ) assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 )
40
'''simple docstring''' from __future__ import annotations import numpy as np def __lowerCAmelCase (__lowerCAmelCase ): return np.maximum(0 , __lowerCAmelCase ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
40
1
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def A_ ( a ): """simple docstring""" return EnvironmentCommand() class _A ( __magic_name__): @staticmethod def UpperCAmelCase ( _SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = parser.add_parser('env' ) download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = huggingface_hub.__version__ SCREAMING_SNAKE_CASE_ : List[Any] = 'not installed' SCREAMING_SNAKE_CASE_ : Dict = 'NA' if is_torch_available(): import torch SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.__version__ SCREAMING_SNAKE_CASE_ : Any = torch.cuda.is_available() SCREAMING_SNAKE_CASE_ : int = 'not installed' if is_transformers_available(): import transformers SCREAMING_SNAKE_CASE_ : List[str] = transformers.__version__ SCREAMING_SNAKE_CASE_ : Optional[Any] = 'not installed' if is_accelerate_available(): import accelerate SCREAMING_SNAKE_CASE_ : Any = accelerate.__version__ SCREAMING_SNAKE_CASE_ : int = 'not installed' if is_xformers_available(): import xformers SCREAMING_SNAKE_CASE_ : Any = xformers.__version__ SCREAMING_SNAKE_CASE_ : Union[str, Any] = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': f"{pt_version} ({pt_cuda_available})", 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(_SCREAMING_SNAKE_CASE ) ) return info @staticmethod def UpperCAmelCase ( _SCREAMING_SNAKE_CASE ): """simple docstring""" return "\n".join([f"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
511
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : List[str] = { 'nielsr/canine-s': 20_48, } # Unicode defines 1,114,112 total “codepoints” lowerCAmelCase : int = 1_11_41_12 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py lowerCAmelCase : Union[str, Any] = 0 lowerCAmelCase : Any = 0XE_0_0_0 lowerCAmelCase : Tuple = 0XE_0_0_1 lowerCAmelCase : List[Any] = 0XE_0_0_2 lowerCAmelCase : int = 0XE_0_0_3 lowerCAmelCase : List[Any] = 0XE_0_0_4 # Maps special codepoints to human-readable names. lowerCAmelCase : Dict[int, str] = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. lowerCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class _A ( __magic_name__): SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2048 , **_SCREAMING_SNAKE_CASE , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else bos_token SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else eos_token SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else sep_token SCREAMING_SNAKE_CASE_ : str = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cls_token SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_ : str = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token super().__init__( bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , model_max_length=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) # Creates a mapping for looking up the IDs of special symbols. SCREAMING_SNAKE_CASE_ : Dict[str, int] = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): SCREAMING_SNAKE_CASE_ : str = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. SCREAMING_SNAKE_CASE_ : Dict[int, str] = { codepoint: name for name, codepoint in self._special_codepoints.items() } SCREAMING_SNAKE_CASE_ : int = UNICODE_VOCAB_SIZE SCREAMING_SNAKE_CASE_ : Optional[int] = len(self._special_codepoints ) @property def UpperCAmelCase ( self ): """simple docstring""" return self._unicode_vocab_size def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" return list(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" try: return ord(_SCREAMING_SNAKE_CASE ) except TypeError: raise ValueError(f"invalid token: '{token}'" ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(_SCREAMING_SNAKE_CASE ) except TypeError: raise ValueError(f"invalid id: {index}" ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" return "".join(_SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id] SCREAMING_SNAKE_CASE_ : Optional[int] = [self.cls_token_id] SCREAMING_SNAKE_CASE_ : int = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Optional[Any] = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] if token_ids_a is not None: result += ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] return result def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] SCREAMING_SNAKE_CASE_ : int = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): """simple docstring""" return ()
511
1
'''simple docstring''' from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class SCREAMING_SNAKE_CASE_ (lowercase__ ): '''simple docstring''' _a = ["image_processor", "tokenizer"] _a = "Pix2StructImageProcessor" _a = ("T5Tokenizer", "T5TokenizerFast") def __init__( self : Optional[Any] , __a : List[Any] , __a : Union[str, Any] ) ->Any: lowerCamelCase_ : Optional[int] = False super().__init__(UpperCAmelCase__ , UpperCAmelCase__ ) def __call__( self : Union[str, Any] , __a : List[Any]=None , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : Optional[int] = 2_048 , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : List[str] , ) ->Any: if images is None and text is None: raise ValueError("""You have to specify either images or text.""" ) # Get only text if images is None and not self.image_processor.is_vqa: lowerCamelCase_ : Optional[Any] = self.tokenizer lowerCamelCase_ : str = self.tokenizer( text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values lowerCamelCase_ : List[str] = self.image_processor( UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , max_patches=UpperCAmelCase__ , **UpperCAmelCase__ ) else: # add pixel_values and bbox lowerCamelCase_ : Optional[int] = self.image_processor( UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , max_patches=UpperCAmelCase__ , header_text=UpperCAmelCase__ , **UpperCAmelCase__ ) if text is not None and not self.image_processor.is_vqa: lowerCamelCase_ : Union[str, Any] = self.tokenizer( text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , ) if "attention_mask" in text_encoding: lowerCamelCase_ : List[Any] = text_encoding.pop("""attention_mask""" ) if "input_ids" in text_encoding: lowerCamelCase_ : Union[str, Any] = text_encoding.pop("""input_ids""" ) else: lowerCamelCase_ : Tuple = None if text_encoding is not None: encoding_image_processor.update(UpperCAmelCase__ ) return encoding_image_processor def _lowerCAmelCase ( self : Tuple , *__a : Optional[int] , **__a : Dict ) ->int: return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) def _lowerCAmelCase ( self : Dict , *__a : List[str] , **__a : Optional[Any] ) ->Any: return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) @property def _lowerCAmelCase ( self : int ) ->List[str]: lowerCamelCase_ : str = self.tokenizer.model_input_names lowerCamelCase_ : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
700
import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def __lowerCamelCase ( A__ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]: lowerCamelCase_ : Optional[Any] = [] if isinstance(A__ , A__ ): for v in tree.values(): shapes.extend(_fetch_dims(A__ ) ) elif isinstance(A__ , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(A__ ) ) elif isinstance(A__ , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("""Not supported""" ) return shapes @torch.jit.ignore def __lowerCamelCase ( A__ : int , A__ : Tuple[int, ...] ) -> Tuple[int, ...]: lowerCamelCase_ : int = [] for d in reversed(A__ ): idx.append(flat_idx % d ) lowerCamelCase_ : str = flat_idx // d return tuple(reversed(A__ ) ) @torch.jit.ignore def __lowerCamelCase ( A__ : Sequence[int] , A__ : Sequence[int] , A__ : Sequence[int] , A__ : Optional[Sequence[bool]] = None , A__ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]: # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(A__ : List[bool] ) -> None: lowerCamelCase_ : Dict = True for i in range(len(A__ ) ): lowerCamelCase_ : Tuple = -1 * (i + 1) l[reversed_idx] &= tally lowerCamelCase_ : Optional[int] = l[reversed_idx] if start_edges is None: lowerCamelCase_ : List[str] = [s == 0 for s in start] reduce_edge_list(A__ ) if end_edges is None: lowerCamelCase_ : Any = [e == (d - 1) for e, d in zip(A__ , A__ )] reduce_edge_list(A__ ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(A__ ) == 0: return [()] elif len(A__ ) == 1: return [(slice(start[0] , end[0] + 1 ),)] lowerCamelCase_ : List[Tuple[slice, ...]] = [] lowerCamelCase_ : List[slice] = [] # Dimensions common to start and end can be selected directly for s, e in zip(A__ , A__ ): if s == e: path_list.append(slice(A__ , s + 1 ) ) else: break lowerCamelCase_ : Tuple[slice, ...] = tuple(A__ ) lowerCamelCase_ : Optional[int] = len(A__ ) # start == end, and we're done if divergence_idx == len(A__ ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None lowerCamelCase_ : Dict = start[divergence_idx] return tuple( path + (slice(A__ , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None lowerCamelCase_ : str = end[divergence_idx] return tuple( path + (slice(A__ , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) lowerCamelCase_ : Optional[Any] = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def __lowerCamelCase ( A__ : torch.Tensor , A__ : int , A__ : int , A__ : int ) -> torch.Tensor: lowerCamelCase_ : int = t.shape[:no_batch_dims] lowerCamelCase_ : List[str] = list(_flat_idx_to_idx(A__ , A__ ) ) # _get_minimal_slice_set is inclusive lowerCamelCase_ : Dict = list(_flat_idx_to_idx(flat_end - 1 , A__ ) ) # Get an ordered list of slices to perform lowerCamelCase_ : Dict = _get_minimal_slice_set( A__ , A__ , A__ , ) lowerCamelCase_ : Any = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def __lowerCamelCase ( A__ : Callable , A__ : Dict[str, Any] , A__ : int , A__ : int , A__ : bool = False , A__ : Any = None , A__ : bool = False , ) -> Any: if not (len(A__ ) > 0): raise ValueError("""Must provide at least one input""" ) lowerCamelCase_ : Optional[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(A__ )] lowerCamelCase_ : Optional[Any] = tuple([max(A__ ) for s in zip(*A__ )] ) def _prep_inputs(A__ : torch.Tensor ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: lowerCamelCase_ : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) lowerCamelCase_ : int = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: lowerCamelCase_ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t lowerCamelCase_ : Dict[str, Any] = tensor_tree_map(_prep_inputs , A__ ) lowerCamelCase_ : Optional[Any] = None if _out is not None: lowerCamelCase_ : str = tensor_tree_map(lambda A__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) lowerCamelCase_ : List[str] = 1 for d in orig_batch_dims: flat_batch_dim *= d lowerCamelCase_ : List[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(A__ : torch.Tensor ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t lowerCamelCase_ : str = 0 lowerCamelCase_ : str = prepped_outputs for _ in range(A__ ): # Chunk the input if not low_mem: lowerCamelCase_ : Tuple = _select_chunk else: lowerCamelCase_ : Any = partial( _chunk_slice , flat_start=A__ , flat_end=min(A__ , i + chunk_size ) , no_batch_dims=len(A__ ) , ) lowerCamelCase_ : Dict[str, Any] = tensor_tree_map(A__ , A__ ) # Run the layer on the chunk lowerCamelCase_ : str = layer(**A__ ) # Allocate space for the output if out is None: lowerCamelCase_ : Optional[Any] = tensor_tree_map(lambda A__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , A__ ) # Put the chunk in its pre-allocated space if isinstance(A__ , A__ ): def assign(A__ : dict , A__ : dict ) -> None: for k, v in da.items(): if isinstance(A__ , A__ ): assign(A__ , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: lowerCamelCase_ : Optional[Any] = da[k] assign(A__ , A__ ) elif isinstance(A__ , A__ ): for xa, xa in zip(A__ , A__ ): if _add_into_out: xa[i : i + chunk_size] += xa else: lowerCamelCase_ : List[str] = xa elif isinstance(A__ , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: lowerCamelCase_ : int = output_chunk else: raise ValueError("""Not supported""" ) i += chunk_size lowerCamelCase_ : Tuple = tensor_tree_map(lambda A__ : t.view(orig_batch_dims + t.shape[1:] ) , A__ ) return out class SCREAMING_SNAKE_CASE_ : '''simple docstring''' def __init__( self : int , __a : int = 512 , ) ->Tuple: lowerCamelCase_ : Optional[int] = max_chunk_size lowerCamelCase_ : Optional[int] = None lowerCamelCase_ : Optional[tuple] = None def _lowerCAmelCase ( self : Tuple , __a : Callable , __a : tuple , __a : int ) ->int: logging.info("""Tuning chunk size...""" ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size lowerCamelCase_ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] lowerCamelCase_ : Dict = [c for c in candidates if c > min_chunk_size] lowerCamelCase_ : Any = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(__a : int ) -> bool: try: with torch.no_grad(): fn(*__a , chunk_size=__a ) return True except RuntimeError: return False lowerCamelCase_ : str = 0 lowerCamelCase_ : List[Any] = len(__a ) - 1 while i > min_viable_chunk_size_index: lowerCamelCase_ : List[Any] = test_chunk_size(candidates[i] ) if not viable: lowerCamelCase_ : Optional[int] = (min_viable_chunk_size_index + i) // 2 else: lowerCamelCase_ : Dict = i lowerCamelCase_ : str = (i + len(__a ) - 1) // 2 return candidates[min_viable_chunk_size_index] def _lowerCAmelCase ( self : int , __a : Iterable , __a : Iterable ) ->bool: lowerCamelCase_ : str = True for aa, aa in zip(__a , __a ): assert type(__a ) == type(__a ) if isinstance(__a , (list, tuple) ): consistent &= self._compare_arg_caches(__a , __a ) elif isinstance(__a , __a ): lowerCamelCase_ : Any = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )] lowerCamelCase_ : Optional[int] = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )] consistent &= self._compare_arg_caches(__a , __a ) else: consistent &= aa == aa return consistent def _lowerCAmelCase ( self : str , __a : Callable , __a : tuple , __a : int , ) ->int: lowerCamelCase_ : List[Any] = True lowerCamelCase_ : tuple = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(__a ) lowerCamelCase_ : int = self._compare_arg_caches(self.cached_arg_data , __a ) else: # Otherwise, we can reuse the precomputed value lowerCamelCase_ : Optional[int] = False if not consistent: lowerCamelCase_ : List[Any] = self._determine_favorable_chunk_size( __a , __a , __a , ) lowerCamelCase_ : Any = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
171
0
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.json"} _SCREAMING_SNAKE_CASE = { "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } _SCREAMING_SNAKE_CASE = {"mgp-str": 27} class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[Any] = VOCAB_FILES_NAMES __lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _lowerCAmelCase , _lowerCAmelCase="[GO]" , _lowerCAmelCase="[GO]" , _lowerCAmelCase="[s]" , _lowerCAmelCase="[GO]" , **_lowerCAmelCase ) -> Union[str, Any]: super().__init__( unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , **_lowerCAmelCase , ) with open(_lowerCAmelCase , encoding="utf-8" ) as vocab_handle: _lowerCAmelCase = json.load(_lowerCAmelCase ) _lowerCAmelCase = {v: k for k, v in self.vocab.items()} @property def _snake_case ( self ) -> Union[str, Any]: return len(self.vocab ) def _snake_case ( self ) -> int: return dict(self.vocab , **self.added_tokens_encoder ) def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]: _lowerCAmelCase = [] for s in text: char_tokens.extend(_lowerCAmelCase ) return char_tokens def _snake_case ( self , _lowerCAmelCase ) -> Tuple: return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) ) def _snake_case ( self , _lowerCAmelCase ) -> str: return self.decoder.get(_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(_lowerCAmelCase ): logger.error("Vocabulary path ({}) should be a directory".format(_lowerCAmelCase ) ) return _lowerCAmelCase = os.path.join( _lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + "\n" ) return (vocab_file,)
18
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json", } class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : str = "gpt_bigcode" __lowerCamelCase : Optional[int] = ["past_key_values"] __lowerCamelCase : List[str] = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , _lowerCAmelCase=50257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=50256 , _lowerCAmelCase=50256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]: _lowerCAmelCase = vocab_size _lowerCAmelCase = n_positions _lowerCAmelCase = n_embd _lowerCAmelCase = n_layer _lowerCAmelCase = n_head _lowerCAmelCase = n_inner _lowerCAmelCase = activation_function _lowerCAmelCase = resid_pdrop _lowerCAmelCase = embd_pdrop _lowerCAmelCase = attn_pdrop _lowerCAmelCase = layer_norm_epsilon _lowerCAmelCase = initializer_range _lowerCAmelCase = scale_attn_weights _lowerCAmelCase = use_cache _lowerCAmelCase = attention_softmax_in_fpaa _lowerCAmelCase = scale_attention_softmax_in_fpaa _lowerCAmelCase = multi_query _lowerCAmelCase = bos_token_id _lowerCAmelCase = eos_token_id super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
18
1
"""simple docstring""" import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging A: Dict = logging.get_logger(__name__) # pylint: disable=invalid-name class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=768 ) -> str: '''simple docstring''' super().__init__(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[str] = proj_size UpperCAmelCase : List[Any] = CLIPVisionModel(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = PaintByExampleMapper(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = nn.LayerNorm(config.hidden_size ) UpperCAmelCase : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling UpperCAmelCase : Tuple = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict: '''simple docstring''' UpperCAmelCase : List[Any] = self.model(pixel_values=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = clip_output.pooler_output UpperCAmelCase : Union[str, Any] = self.mapper(latent_states[:, None] ) UpperCAmelCase : int = self.final_layer_norm(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = self.proj_out(_SCREAMING_SNAKE_CASE ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self , _SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' super().__init__() UpperCAmelCase : Any = (config.num_hidden_layers + 1) // 5 UpperCAmelCase : Any = config.hidden_size UpperCAmelCase : Optional[int] = 1 UpperCAmelCase : Optional[int] = nn.ModuleList( [ BasicTransformerBlock(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation_fn="""gelu""" , attention_bias=_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) ] ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' for block in self.blocks: UpperCAmelCase : Any = block(_SCREAMING_SNAKE_CASE ) return hidden_states
359
"""simple docstring""" import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py A: Optional[Any] = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n" A: Optional[int] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n" A: Optional[int] = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Dict = compute_bleu( reference_corpus=_SCREAMING_SNAKE_CASE , translation_corpus=_SCREAMING_SNAKE_CASE , max_order=_SCREAMING_SNAKE_CASE , smooth=_SCREAMING_SNAKE_CASE ) ((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Dict = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
359
1
from manim import * class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' def lowerCAmelCase__ (self ) -> List[str]: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = Rectangle(height=0.5 ,width=0.5 ) lowerCAmelCase__ : Any = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 ) lowerCAmelCase__ : int = Rectangle(height=0.25 ,width=0.25 ) lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )] lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : int = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase ,buff=0 ) lowerCAmelCase__ : Optional[Any] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase ,buff=0 ) lowerCAmelCase__ : Any = VGroup(__lowerCamelCase ,__lowerCamelCase ).arrange(__lowerCamelCase ,buff=0 ) lowerCAmelCase__ : Optional[Any] = Text('''CPU''' ,font_size=24 ) lowerCAmelCase__ : Optional[Any] = Group(__lowerCamelCase ,__lowerCamelCase ).arrange(__lowerCamelCase ,buff=0.5 ,aligned_edge=__lowerCamelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowerCamelCase ) lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(4 )] lowerCAmelCase__ : Union[str, Any] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase ,buff=0 ) lowerCAmelCase__ : Tuple = Text('''GPU''' ,font_size=24 ) lowerCAmelCase__ : Optional[int] = Group(__lowerCamelCase ,__lowerCamelCase ).arrange(__lowerCamelCase ,buff=0.5 ,aligned_edge=__lowerCamelCase ) gpu.move_to([-1, -1, 0] ) self.add(__lowerCamelCase ) lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Tuple = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase ,buff=0 ) lowerCAmelCase__ : Optional[int] = Text('''Model''' ,font_size=24 ) lowerCAmelCase__ : List[Any] = Group(__lowerCamelCase ,__lowerCamelCase ).arrange(__lowerCamelCase ,buff=0.5 ,aligned_edge=__lowerCamelCase ) model.move_to([3, -1.0, 0] ) self.add(__lowerCamelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : Optional[Any] = [] for i, rect in enumerate(__lowerCamelCase ): lowerCAmelCase__ : str = fill.copy().set_fill(__lowerCamelCase ,opacity=0.8 ) target.move_to(__lowerCamelCase ) model_arr.append(__lowerCamelCase ) lowerCAmelCase__ : Optional[Any] = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase ,opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(__lowerCamelCase ) self.add(*__lowerCamelCase ,*__lowerCamelCase ) lowerCAmelCase__ : List[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Tuple = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Union[str, Any] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase ,buff=0 ) lowerCAmelCase__ : Any = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase ,buff=0 ) lowerCAmelCase__ : List[str] = VGroup(__lowerCamelCase ,__lowerCamelCase ).arrange(__lowerCamelCase ,buff=0 ) lowerCAmelCase__ : Tuple = Text('''Disk''' ,font_size=24 ) lowerCAmelCase__ : Optional[int] = Group(__lowerCamelCase ,__lowerCamelCase ).arrange(__lowerCamelCase ,buff=0.5 ,aligned_edge=__lowerCamelCase ) disk.move_to([-4, -1.25, 0] ) self.add(__lowerCamelCase ,__lowerCamelCase ) lowerCAmelCase__ : Union[str, Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase__ : Any = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) self.add(__lowerCamelCase ,__lowerCamelCase ) lowerCAmelCase__ : Any = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,) blue_text.next_to(__lowerCamelCase ,DOWN * 2.4 ,aligned_edge=key_text.get_left() ) self.add(__lowerCamelCase ) lowerCAmelCase__ : List[str] = MarkupText( f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCamelCase ) ) lowerCAmelCase__ : Any = Square(0.3 ) input.set_fill(__lowerCamelCase ,opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] ,__lowerCamelCase ,buff=0.5 ) self.play(Write(__lowerCamelCase ) ) input.generate_target() input.target.next_to(model_arr[0] ,direction=__lowerCamelCase ,buff=0.02 ) self.play(MoveToTarget(__lowerCamelCase ) ) self.play(FadeOut(__lowerCamelCase ) ) lowerCAmelCase__ : Any = Arrow(start=__lowerCamelCase ,end=__lowerCamelCase ,color=__lowerCamelCase ,buff=0.5 ) a.next_to(model_arr[0].get_left() ,__lowerCamelCase ,buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) lowerCAmelCase__ : List[Any] = MarkupText( f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCamelCase ,run_time=3 ) ) lowerCAmelCase__ : Tuple = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02} self.play( Write(__lowerCamelCase ) ,Circumscribe(model_arr[0] ,color=__lowerCamelCase ,**__lowerCamelCase ) ,Circumscribe(model_cpu_arr[0] ,color=__lowerCamelCase ,**__lowerCamelCase ) ,Circumscribe(gpu_rect[0] ,color=__lowerCamelCase ,**__lowerCamelCase ) ,) self.play(MoveToTarget(model_cpu_arr[0] ) ) lowerCAmelCase__ : Union[str, Any] = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 ,__lowerCamelCase ,buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) lowerCAmelCase__ : Any = AnimationGroup( FadeOut(__lowerCamelCase ,run_time=0.5 ) ,MoveToTarget(__lowerCamelCase ,run_time=0.5 ) ,FadeIn(__lowerCamelCase ,run_time=0.5 ) ,lag_ratio=0.2 ) self.play(__lowerCamelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: lowerCAmelCase__ : List[Any] = 0.7 self.play( Circumscribe(model_arr[i] ,**__lowerCamelCase ) ,Circumscribe(cpu_left_col_base[i] ,**__lowerCamelCase ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__lowerCamelCase ,**__lowerCamelCase ) ,Circumscribe(gpu_rect[0] ,color=__lowerCamelCase ,**__lowerCamelCase ) ,Circumscribe(model_arr[i + 1] ,color=__lowerCamelCase ,**__lowerCamelCase ) ,) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,) else: self.play( MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 ) self.play( Circumscribe(model_arr[-1] ,color=__lowerCamelCase ,**__lowerCamelCase ) ,Circumscribe(cpu_left_col_base[-1] ,color=__lowerCamelCase ,**__lowerCamelCase ) ,Circumscribe(gpu_rect[0] ,color=__lowerCamelCase ,**__lowerCamelCase ) ,) self.play(MoveToTarget(model_cpu_arr[i] ) ) lowerCAmelCase__ : int = a_c lowerCAmelCase__ : List[Any] = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 ) self.play( FadeOut(__lowerCamelCase ) ,FadeOut(__lowerCamelCase ,run_time=0.5 ) ,) lowerCAmelCase__ : Union[str, Any] = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCamelCase ,run_time=3 ) ,MoveToTarget(__lowerCamelCase ) ) self.wait()
647
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase__ : '''simple docstring''' def __init__(self ,__lowerCamelCase ,__lowerCamelCase=13 ,__lowerCamelCase=7 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=99 ,__lowerCamelCase=32 ,__lowerCamelCase=5 ,__lowerCamelCase=4 ,__lowerCamelCase=37 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=1_28 ,__lowerCamelCase=32 ,__lowerCamelCase=16 ,__lowerCamelCase=2 ,__lowerCamelCase=0.02 ,__lowerCamelCase=3 ,__lowerCamelCase=4 ,__lowerCamelCase=None ,) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : int = parent lowerCAmelCase__ : Optional[int] = batch_size lowerCAmelCase__ : int = seq_length lowerCAmelCase__ : int = is_training lowerCAmelCase__ : Dict = use_input_mask lowerCAmelCase__ : Optional[Any] = use_token_type_ids lowerCAmelCase__ : List[Any] = use_labels lowerCAmelCase__ : int = vocab_size lowerCAmelCase__ : List[Any] = hidden_size lowerCAmelCase__ : Dict = num_hidden_layers lowerCAmelCase__ : int = num_attention_heads lowerCAmelCase__ : Optional[Any] = intermediate_size lowerCAmelCase__ : List[str] = hidden_act lowerCAmelCase__ : Optional[int] = hidden_dropout_prob lowerCAmelCase__ : Any = attention_probs_dropout_prob lowerCAmelCase__ : List[Any] = max_position_embeddings lowerCAmelCase__ : Optional[int] = type_vocab_size lowerCAmelCase__ : Union[str, Any] = type_sequence_label_size lowerCAmelCase__ : Union[str, Any] = initializer_range lowerCAmelCase__ : int = num_labels lowerCAmelCase__ : Optional[int] = num_choices lowerCAmelCase__ : Optional[int] = scope def lowerCAmelCase__ (self ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowerCAmelCase__ : Any = None if self.use_input_mask: lowerCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) lowerCAmelCase__ : List[Any] = None lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : str = None if self.use_labels: lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowerCAmelCase__ : int = ids_tensor([self.batch_size] ,self.num_choices ) lowerCAmelCase__ : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ (self ) -> Tuple: """simple docstring""" return NezhaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCamelCase ,initializer_range=self.initializer_range ,) def lowerCAmelCase__ (self ) -> str: """simple docstring""" ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : str = self.prepare_config_and_inputs() lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> int: """simple docstring""" lowerCAmelCase__ : int = NezhaModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ) lowerCAmelCase__ : List[Any] = model(__lowerCamelCase ,token_type_ids=__lowerCamelCase ) lowerCAmelCase__ : Union[str, Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : Union[str, Any] = NezhaModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCAmelCase__ : int = model( __lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,encoder_hidden_states=__lowerCamelCase ,encoder_attention_mask=__lowerCamelCase ,) lowerCAmelCase__ : Dict = model( __lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,encoder_hidden_states=__lowerCamelCase ,) lowerCAmelCase__ : Dict = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Dict: """simple docstring""" lowerCAmelCase__ : Any = NezhaForMaskedLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCAmelCase__ : List[Any] = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple: """simple docstring""" lowerCAmelCase__ : Optional[Any] = NezhaForNextSentencePrediction(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCAmelCase__ : str = model( __lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple: """simple docstring""" lowerCAmelCase__ : str = NezhaForPreTraining(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCAmelCase__ : List[str] = model( __lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ,next_sentence_label=__lowerCamelCase ,) self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Dict: """simple docstring""" lowerCAmelCase__ : List[str] = NezhaForQuestionAnswering(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCAmelCase__ : int = model( __lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,start_positions=__lowerCamelCase ,end_positions=__lowerCamelCase ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = self.num_labels lowerCAmelCase__ : List[Any] = NezhaForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple: """simple docstring""" lowerCAmelCase__ : str = self.num_labels lowerCAmelCase__ : int = NezhaForTokenClassification(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> int: """simple docstring""" lowerCAmelCase__ : int = self.num_choices lowerCAmelCase__ : List[Any] = NezhaForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCAmelCase__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowerCAmelCase__ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowerCAmelCase__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowerCAmelCase__ : List[Any] = model( __lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,labels=__lowerCamelCase ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def lowerCAmelCase__ (self ) -> Tuple: """simple docstring""" lowerCAmelCase__ : List[Any] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : List[Any] = config_and_inputs lowerCAmelCase__ : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase): '''simple docstring''' snake_case_ =( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) snake_case_ =( { """feature-extraction""": NezhaModel, """fill-mask""": NezhaForMaskedLM, """question-answering""": NezhaForQuestionAnswering, """text-classification""": NezhaForSequenceClassification, """token-classification""": NezhaForTokenClassification, """zero-shot""": NezhaForSequenceClassification, } if is_torch_available() else {} ) snake_case_ =True def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase=False ) -> Tuple: """simple docstring""" lowerCAmelCase__ : Dict = super()._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ,return_labels=__lowerCamelCase ) if return_labels: if model_class in get_values(__lowerCamelCase ): lowerCAmelCase__ : str = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=__lowerCamelCase ) lowerCAmelCase__ : List[str] = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase__ (self ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : Optional[int] = NezhaModelTester(self ) lowerCAmelCase__ : Any = ConfigTester(self ,config_class=__lowerCamelCase ,hidden_size=37 ) def lowerCAmelCase__ (self ) -> str: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase__ (self ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def lowerCAmelCase__ (self ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__lowerCamelCase ) def lowerCAmelCase__ (self ) -> int: """simple docstring""" ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() lowerCAmelCase__ : Dict = None self.model_tester.create_and_check_model_as_decoder( __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,) def lowerCAmelCase__ (self ) -> Tuple: """simple docstring""" lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def lowerCAmelCase__ (self ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase ) def lowerCAmelCase__ (self ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*__lowerCamelCase ) def lowerCAmelCase__ (self ) -> int: """simple docstring""" lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase ) def lowerCAmelCase__ (self ) -> str: """simple docstring""" lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase ) def lowerCAmelCase__ (self ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase ) def lowerCAmelCase__ (self ) -> Any: """simple docstring""" lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase ) @slow def lowerCAmelCase__ (self ) -> Union[str, Any]: """simple docstring""" for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : List[str] = NezhaModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @slow @require_torch_gpu def lowerCAmelCase__ (self ) -> str: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return lowerCAmelCase__ : List[Any] = True lowerCAmelCase__ : str = model_class(config=__lowerCamelCase ) lowerCAmelCase__ : Dict = self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ) lowerCAmelCase__ : Union[str, Any] = torch.jit.trace( __lowerCamelCase ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__lowerCamelCase ,os.path.join(__lowerCamelCase ,'''bert.pt''' ) ) lowerCAmelCase__ : Union[str, Any] = torch.jit.load(os.path.join(__lowerCamelCase ,'''bert.pt''' ) ,map_location=__lowerCamelCase ) loaded(inputs_dict['''input_ids'''].to(__lowerCamelCase ) ,inputs_dict['''attention_mask'''].to(__lowerCamelCase ) ) @require_torch class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' @slow def lowerCAmelCase__ (self ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : str = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' ) lowerCAmelCase__ : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowerCAmelCase__ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCAmelCase__ : int = model(__lowerCamelCase ,attention_mask=__lowerCamelCase )[0] lowerCAmelCase__ : Optional[int] = torch.Size((1, 6, 7_68) ) self.assertEqual(output.shape ,__lowerCamelCase ) lowerCAmelCase__ : Tuple = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,__lowerCamelCase ,atol=1e-4 ) ) @slow def lowerCAmelCase__ (self ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : str = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' ) lowerCAmelCase__ : int = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowerCAmelCase__ : Tuple = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase ,attention_mask=__lowerCamelCase )[0] lowerCAmelCase__ : Optional[Any] = torch.Size((1, 6, 2_11_28) ) self.assertEqual(output.shape ,__lowerCamelCase ) lowerCAmelCase__ : List[Any] = torch.tensor( [[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,__lowerCamelCase ,atol=1e-4 ) )
647
1
from __future__ import annotations def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" __UpperCamelCase = len(lowercase_ ) // 2 # choose the middle 3 elements __UpperCamelCase = lst[m - 1 : m + 2] # if middle element is peak if three[1] > three[0] and three[1] > three[2]: return three[1] # if increasing, recurse on right elif three[0] < three[2]: if len(lst[:m] ) == 2: m -= 1 return peak(lst[m:] ) # decreasing else: if len(lst[:m] ) == 2: m += 1 return peak(lst[:m] ) if __name__ == "__main__": import doctest doctest.testmod()
375
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = ["model.decoder.embed_positions.weights"] def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]: """simple docstring""" if "emb" in name: __UpperCamelCase = name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: __UpperCamelCase = name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: __UpperCamelCase = name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: __UpperCamelCase = name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: __UpperCamelCase = name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: __UpperCamelCase = name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: __UpperCamelCase = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: __UpperCamelCase = name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: __UpperCamelCase = name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: __UpperCamelCase = name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: __UpperCamelCase = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple[Dict, Dict]: """simple docstring""" __UpperCamelCase = list(state_dict.keys() ) __UpperCamelCase = {} for key in keys: __UpperCamelCase = state_dict.pop(lowercase_ ) __UpperCamelCase = rename_keys(lowercase_ ) if "in_proj_weight" in key: # split fused qkv proj __UpperCamelCase = val[:hidden_size, :] __UpperCamelCase = val[hidden_size : 2 * hidden_size, :] __UpperCamelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: __UpperCamelCase = val else: __UpperCamelCase = val return state_dict, enc_dec_proj_state_dict def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> MusicgenDecoderConfig: """simple docstring""" if checkpoint == "small": # default config values __UpperCamelCase = 10_24 __UpperCamelCase = 24 __UpperCamelCase = 16 elif checkpoint == "medium": __UpperCamelCase = 15_36 __UpperCamelCase = 48 __UpperCamelCase = 24 elif checkpoint == "large": __UpperCamelCase = 20_48 __UpperCamelCase = 48 __UpperCamelCase = 32 else: raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) __UpperCamelCase = MusicgenDecoderConfig( hidden_size=lowercase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase_ , num_attention_heads=lowercase_ , ) return config @torch.no_grad() def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_="cpu" ) -> List[Any]: """simple docstring""" __UpperCamelCase = MusicGen.get_pretrained(lowercase_ , device=lowercase_ ) __UpperCamelCase = decoder_config_from_checkpoint(lowercase_ ) __UpperCamelCase = fairseq_model.lm.state_dict() __UpperCamelCase , __UpperCamelCase = rename_state_dict( lowercase_ , hidden_size=decoder_config.hidden_size ) __UpperCamelCase = TaEncoderModel.from_pretrained('''t5-base''' ) __UpperCamelCase = EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) __UpperCamelCase = MusicgenForCausalLM(lowercase_ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection __UpperCamelCase , __UpperCamelCase = decoder.load_state_dict(lowercase_ , strict=lowercase_ ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(lowercase_ ) if len(lowercase_ ) > 0: raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" ) if len(lowercase_ ) > 0: raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model __UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=lowercase_ , audio_encoder=lowercase_ , decoder=lowercase_ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(lowercase_ ) # check we can do a forward pass __UpperCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) __UpperCamelCase = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): __UpperCamelCase = model(input_ids=lowercase_ , decoder_input_ids=lowercase_ ).logits if logits.shape != (8, 1, 20_48): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor __UpperCamelCase = AutoTokenizer.from_pretrained('''t5-base''' ) __UpperCamelCase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) __UpperCamelCase = MusicgenProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ ) # set the appropriate bos/pad token ids __UpperCamelCase = 20_48 __UpperCamelCase = 20_48 # set other default generation config params __UpperCamelCase = int(30 * audio_encoder.config.frame_rate ) __UpperCamelCase = True __UpperCamelCase = 3.0 if pytorch_dump_folder is not None: Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(lowercase_ ) processor.save_pretrained(lowercase_ ) if repo_id: logger.info(F"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(lowercase_ ) processor.push_to_hub(lowercase_ ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) a_ = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
375
1
from collections import deque class _UpperCamelCase: def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' __a : int = process_name # process name __a : Dict = arrival_time # arrival time of the process # completion time of finished process or last interrupted time __a : int = arrival_time __a : Any = burst_time # remaining burst time __a : List[str] = 0 # total time of the process wait in ready queue __a : Union[str, Any] = 0 # time from arrival time to completion time class _UpperCamelCase: def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : deque[Process] , SCREAMING_SNAKE_CASE__ : int , ): '''simple docstring''' __a : Dict = number_of_queues # time slice of queues that round robin algorithm applied __a : int = time_slices # unfinished process is in this ready_queue __a : List[Any] = queue # current time __a : Dict = current_time # finished process is in this sequence queue __a : deque[Process] = deque() def __lowerCAmelCase ( self : List[str] ): '''simple docstring''' __a : List[Any] = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : list[Process] ): '''simple docstring''' __a : str = [] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : list[Process] ): '''simple docstring''' __a : List[str] = [] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : list[Process] ): '''simple docstring''' __a : Tuple = [] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): completion_times.append(queue[i].stop_time ) return completion_times def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : deque[Process] ): '''simple docstring''' return [q.burst_time for q in queue] def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Process ): '''simple docstring''' process.waiting_time += self.current_time - process.stop_time return process.waiting_time def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : deque[Process] ): '''simple docstring''' __a : deque[Process] = deque() # sequence deque of finished process while len(SCREAMING_SNAKE_CASE__ ) != 0: __a : Any = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(SCREAMING_SNAKE_CASE__ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 __a : str = 0 # set the process's turnaround time because it is finished __a : str = self.current_time - cp.arrival_time # set the completion time __a : Optional[Any] = self.current_time # add the process to queue that has finished queue finished.append(SCREAMING_SNAKE_CASE__ ) self.finish_queue.extend(SCREAMING_SNAKE_CASE__ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : deque[Process] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' __a : deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): __a : str = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(SCREAMING_SNAKE_CASE__ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time __a : Union[str, Any] = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(SCREAMING_SNAKE_CASE__ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished __a : Optional[int] = 0 # set the finish time __a : Dict = self.current_time # update the process' turnaround time because it is finished __a : Tuple = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(SCREAMING_SNAKE_CASE__ ) self.finish_queue.extend(SCREAMING_SNAKE_CASE__ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' for i in range(self.number_of_queues - 1 ): __a , __a : Union[str, Any] = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest SCREAMING_SNAKE_CASE__ = Process('''P1''', 0, 53) SCREAMING_SNAKE_CASE__ = Process('''P2''', 0, 17) SCREAMING_SNAKE_CASE__ = Process('''P3''', 0, 68) SCREAMING_SNAKE_CASE__ = Process('''P4''', 0, 24) SCREAMING_SNAKE_CASE__ = 3 SCREAMING_SNAKE_CASE__ = [17, 25] SCREAMING_SNAKE_CASE__ = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) SCREAMING_SNAKE_CASE__ = Process('''P1''', 0, 53) SCREAMING_SNAKE_CASE__ = Process('''P2''', 0, 17) SCREAMING_SNAKE_CASE__ = Process('''P3''', 0, 68) SCREAMING_SNAKE_CASE__ = Process('''P4''', 0, 24) SCREAMING_SNAKE_CASE__ = 3 SCREAMING_SNAKE_CASE__ = [17, 25] SCREAMING_SNAKE_CASE__ = deque([Pa, Pa, Pa, Pa]) SCREAMING_SNAKE_CASE__ = MLFQ(number_of_queues, time_slices, queue, 0) SCREAMING_SNAKE_CASE__ = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F"waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print completion times of processes(P1, P2, P3, P4) print( F"completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print total turnaround times of processes(P1, P2, P3, P4) print( F"turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print sequence of finished processes print( F"sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}" )
47
'''simple docstring''' import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging lowercase__ ={ 'cola': 2, 'mnli': 3, 'mrpc': 2, 'sst-2': 2, 'sts-b': 1, 'qqp': 2, 'qnli': 2, 'rte': 2, 'wnli': 2, } logging.set_verbosity_info() def UpperCamelCase_ ( A__ , A__ , A__ , A__=None ): # Initialise PyTorch model a_ = XLNetConfig.from_json_file(A__ ) a_ = finetuning_task.lower() if finetuning_task is not None else """""" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' ) a_ = finetuning_task a_ = GLUE_TASKS_NUM_LABELS[finetuning_task] a_ = XLNetForSequenceClassification(A__ ) elif "squad" in finetuning_task: a_ = finetuning_task a_ = XLNetForQuestionAnswering(A__ ) else: a_ = XLNetLMHeadModel(A__ ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(A__ , A__ , A__ ) # Save pytorch-model a_ = os.path.join(A__ , A__ ) a_ = os.path.join(A__ , A__ ) print(F'''Save PyTorch model to {os.path.abspath(A__ )}''' ) torch.save(model.state_dict() , A__ ) print(F'''Save configuration file to {os.path.abspath(A__ )}''' ) with open(A__ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowercase__ =argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--xlnet_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained XLNet model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the folder to store the PyTorch model or dataset/vocab.', ) parser.add_argument( '--finetuning_task', default=None, type=str, help='Name of a task on which the XLNet TensorFlow model was fine-tuned', ) lowercase__ =parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
263
0
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable a__ : str ={ '''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''], '''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Tuple =[ '''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXJapaneseForCausalLM''', '''GPTNeoXJapaneseLayer''', '''GPTNeoXJapaneseModel''', '''GPTNeoXJapanesePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys a__ : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
434
'''simple docstring''' import inspect import unittest class snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCamelCase ( self : Optional[Any] ): try: import diffusers # noqa: F401 except ImportError: assert False def _lowerCamelCase ( self : Tuple ): import diffusers from diffusers.dependency_versions_table import deps __UpperCamelCase = inspect.getmembers(__A , inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": __UpperCamelCase = 'k-diffusion' elif backend == "invisible_watermark": __UpperCamelCase = 'invisible-watermark' assert backend in deps, f'''{backend} is not in the deps table!'''
434
1
'''simple docstring''' from __future__ import annotations from collections import namedtuple def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[Any] = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
634
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=64 , ) -> Optional[int]: _lowerCamelCase : List[str] = parent _lowerCamelCase : List[Any] = batch_size _lowerCamelCase : Tuple = is_training _lowerCamelCase : Tuple = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[str] = num_channels _lowerCamelCase : List[str] = min_size _lowerCamelCase : Tuple = max_size _lowerCamelCase : str = num_labels _lowerCamelCase : Any = hidden_dim _lowerCamelCase : Dict = hidden_dim def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) > 0.5 ).float() _lowerCamelCase : Dict = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE) > 0.5).long() _lowerCamelCase : Optional[int] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase_ ( self) -> str: _lowerCamelCase : List[str] = MaskaFormerConfig( hidden_size=self.hidden_dim , ) _lowerCamelCase : Any = self.num_queries _lowerCamelCase : int = self.num_labels _lowerCamelCase : int = [1, 1, 1, 1] _lowerCamelCase : Any = self.num_channels _lowerCamelCase : Optional[Any] = 64 _lowerCamelCase : str = 128 _lowerCamelCase : Optional[Any] = self.hidden_dim _lowerCamelCase : Any = self.hidden_dim _lowerCamelCase : List[Any] = self.hidden_dim return config def UpperCamelCase_ ( self) -> Any: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = self.prepare_config_and_inputs() _lowerCamelCase : str = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]: _lowerCamelCase : str = output.encoder_hidden_states _lowerCamelCase : int = output.pixel_decoder_hidden_states _lowerCamelCase : Optional[int] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , config.decoder_layers) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> List[str]: with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str: _lowerCamelCase : str = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() def comm_check_on_output(SCREAMING_SNAKE_CASE): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1)) with torch.no_grad(): _lowerCamelCase : List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = model( pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape , torch.Size([1])) @require_torch class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __UpperCAmelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Optional[int] = MaskaFormerModelTester(self) _lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[str]: self.config_tester.run_common_tests() def UpperCamelCase_ ( self) -> int: _lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Mask2Former does not use inputs_embeds""") def UpperCamelCase_ ( self) -> Optional[int]: pass @unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""") def UpperCamelCase_ ( self) -> Tuple: pass @unittest.skip(reason="""Mask2Former is not a generative model""") def UpperCamelCase_ ( self) -> List[Any]: pass @unittest.skip(reason="""Mask2Former does not use token embeddings""") def UpperCamelCase_ ( self) -> Any: pass @require_torch_multi_gpu @unittest.skip( reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""") def UpperCamelCase_ ( self) -> Dict: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""") def UpperCamelCase_ ( self) -> Optional[int]: pass def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : str = [*signature.parameters.keys()] _lowerCamelCase : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> Optional[int]: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: _lowerCamelCase : Optional[int] = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Dict = (self.model_tester.min_size,) * 2 _lowerCamelCase : str = { """pixel_values""": torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE), """mask_labels""": torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE), """class_labels""": torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE).long(), } _lowerCamelCase : List[str] = self.model_tester.get_config() _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE) self.assertTrue(outputs.attentions is not None) def UpperCamelCase_ ( self) -> Optional[Any]: if not self.model_tester.is_training: return _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE).loss loss.backward() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : int = True _lowerCamelCase : Optional[Any] = True _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : int = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() _lowerCamelCase : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) UpperCAmelCase = 1e-4 def _snake_case ( ): """simple docstring""" _lowerCamelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowercase__ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self) -> int: return "facebook/mask2former-swin-small-coco-instance" @cached_property def UpperCamelCase_ ( self) -> Union[str, Any]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : Dict = model(**SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Any = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Dict = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : Optional[Any] = self.default_image_processor _lowerCamelCase : Any = prepare_img() _lowerCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE) # masks_queries_logits _lowerCamelCase : str = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4)) _lowerCamelCase : Any = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] _lowerCamelCase : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) # class_queries_logits _lowerCamelCase : List[str] = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1)) _lowerCamelCase : Optional[Any] = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ]).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : Tuple = image_processor( [np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors="""pt""" , ) _lowerCamelCase : Optional[Any] = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""mask_labels"""]] _lowerCamelCase : Union[str, Any] = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""class_labels"""]] with torch.no_grad(): _lowerCamelCase : Any = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None)
88
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=4_00 , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=True , ): snake_case__ = parent snake_case__ = batch_size snake_case__ = num_channels snake_case__ = image_size snake_case__ = min_resolution snake_case__ = max_resolution snake_case__ = do_resize snake_case__ = size_divisor snake_case__ = do_rescale def A_ ( self ): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ): _A : Optional[Any] = GLPNImageProcessor if is_vision_available() else None def A_ ( self ): snake_case__ = GLPNImageProcessingTester(self ) @property def A_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def A_ ( self ): snake_case__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(lowerCamelCase , "size_divisor" ) ) self.assertTrue(hasattr(lowerCamelCase , "resample" ) ) self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) ) def A_ ( self ): pass def A_ ( self ): # Initialize image_processing snake_case__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def A_ ( self ): # Initialize image_processing snake_case__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def A_ ( self ): # Initialize image_processing snake_case__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
700
from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ): if len(__lowerCAmelCase ) == 0: return False snake_case__ = len(__lowerCAmelCase ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , __lowerCAmelCase ) else: return binary_search(a_list[midpoint + 1 :] , __lowerCAmelCase ) if __name__ == "__main__": __magic_name__ = input('''Enter numbers separated by comma:\n''').strip() __magic_name__ = [int(item.strip()) for item in user_input.split(''',''')] __magic_name__ = int(input('''Enter the number to be found in the list:\n''').strip()) __magic_name__ = '''''' if binary_search(sequence, target) else '''not ''' print(F'''{target} was {not_str}found in {sequence}''')
530
0
import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def snake_case ( snake_case__ :Optional[Any]) -> Dict: _A = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder"""): _A = key.replace("""module.encoder""" , """glpn.encoder""") if key.startswith("""module.decoder"""): _A = key.replace("""module.decoder""" , """decoder.stages""") if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _A = key[key.find("""patch_embed""") + len("""patch_embed""")] _A = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(snake_case__)-1}''') if "norm" in key: _A = key.replace("""norm""" , """layer_norm""") if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _A = key[key.find("""glpn.encoder.layer_norm""") + len("""glpn.encoder.layer_norm""")] _A = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(snake_case__)-1}''') if "layer_norm1" in key: _A = key.replace("""layer_norm1""" , """layer_norm_1""") if "layer_norm2" in key: _A = key.replace("""layer_norm2""" , """layer_norm_2""") if "block" in key: # replace for example block1 by block.0 _A = key[key.find("""block""") + len("""block""")] _A = key.replace(F'''block{idx}''' , F'''block.{int(snake_case__)-1}''') if "attn.q" in key: _A = key.replace("""attn.q""" , """attention.self.query""") if "attn.proj" in key: _A = key.replace("""attn.proj""" , """attention.output.dense""") if "attn" in key: _A = key.replace("""attn""" , """attention.self""") if "fc1" in key: _A = key.replace("""fc1""" , """dense1""") if "fc2" in key: _A = key.replace("""fc2""" , """dense2""") if "linear_pred" in key: _A = key.replace("""linear_pred""" , """classifier""") if "linear_fuse" in key: _A = key.replace("""linear_fuse.conv""" , """linear_fuse""") _A = key.replace("""linear_fuse.bn""" , """batch_norm""") if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _A = key[key.find("""linear_c""") + len("""linear_c""")] _A = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(snake_case__)-1}''') if "bot_conv" in key: _A = key.replace("""bot_conv""" , """0.convolution""") if "skip_conv1" in key: _A = key.replace("""skip_conv1""" , """1.convolution""") if "skip_conv2" in key: _A = key.replace("""skip_conv2""" , """2.convolution""") if "fusion1" in key: _A = key.replace("""fusion1""" , """1.fusion""") if "fusion2" in key: _A = key.replace("""fusion2""" , """2.fusion""") if "fusion3" in key: _A = key.replace("""fusion3""" , """3.fusion""") if "fusion" in key and "conv" in key: _A = key.replace("""conv""" , """convolutional_layer""") if key.startswith("""module.last_layer_depth"""): _A = key.replace("""module.last_layer_depth""" , """head.head""") _A = value return new_state_dict def snake_case ( snake_case__ :Dict , snake_case__ :Optional[int]) -> Union[str, Any]: # for each of the encoder blocks: for i in range(config.num_encoder_blocks): for j in range(config.depths[i]): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _A = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''') _A = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''') # next, add keys and values (in that order) to the state dict _A = kv_weight[ : config.hidden_sizes[i], : ] _A = kv_bias[: config.hidden_sizes[i]] _A = kv_weight[ config.hidden_sizes[i] :, : ] _A = kv_bias[config.hidden_sizes[i] :] def snake_case ( ) -> List[Any]: _A = """http://images.cocodataset.org/val2017/000000039769.jpg""" _A = Image.open(requests.get(snake_case__ , stream=snake_case__).raw) return image @torch.no_grad() def snake_case ( snake_case__ :Optional[int] , snake_case__ :Dict , snake_case__ :int=False , snake_case__ :List[str]=None) -> Tuple: _A = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3]) # load image processor (only resize + rescale) _A = GLPNImageProcessor() # prepare image _A = prepare_img() _A = image_processor(images=snake_case__ , return_tensors="""pt""").pixel_values logger.info("""Converting model...""") # load original state dict _A = torch.load(snake_case__ , map_location=torch.device("""cpu""")) # rename keys _A = rename_keys(snake_case__) # key and value matrices need special treatment read_in_k_v(snake_case__ , snake_case__) # create HuggingFace model and load state dict _A = GLPNForDepthEstimation(snake_case__) model.load_state_dict(snake_case__) model.eval() # forward pass _A = model(snake_case__) _A = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: _A = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]]) elif "kitti" in model_name: _A = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]]) else: raise ValueError(F'''Unknown model name: {model_name}''') _A = torch.Size([1, 480, 640]) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , snake_case__ , atol=1E-4) print("""Looks ok!""") # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""") model.push_to_hub( repo_path_or_name=Path(snake_case__ , snake_case__) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=snake_case__ , ) image_processor.push_to_hub( repo_path_or_name=Path(snake_case__ , snake_case__) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=snake_case__ , ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
401
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _SCREAMING_SNAKE_CASE = abspath(join(dirname(dirname(dirname(__file__))), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def snake_case ( snake_case__ :List[str]) -> List[Any]: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case__) def snake_case ( snake_case__ :Optional[Any]) -> Dict: from transformers.testing_utils import pytest_terminal_summary_main _A = terminalreporter.config.getoption("""--make-reports""") if make_reports: pytest_terminal_summary_main(snake_case__ , id=snake_case__)
401
1
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ = [1] for i in range(2 , UpperCAmelCase__ ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" lowercase_ = [] lowercase_ = list(range(UpperCAmelCase__ ) ) # Find permutation while factorials: lowercase_ = factorials.pop() lowercase_ , lowercase_ = divmod(UpperCAmelCase__ , UpperCAmelCase__ ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
650
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): a = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: a = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def UpperCAmelCase_ ( UpperCAmelCase__ ): lowercase_ = (images / 2 + 0.5).clamp(0 , 1 ) lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowercase_ = numpy_to_pil(UpperCAmelCase__ ) return images def UpperCAmelCase_ ( UpperCAmelCase__ ): if images.ndim == 3: lowercase_ = images[None, ...] lowercase_ = (images * 2_5_5).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowercase_ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images] else: lowercase_ = [Image.fromarray(UpperCAmelCase__ ) for image in images] return pil_images
650
1
'''simple docstring''' import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): def __lowerCamelCase ( self , lowercase__ ): """simple docstring""" with open(lowercase__ , encoding="utf-8" ) as input_file: SCREAMING_SNAKE_CASE_ : str = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) SCREAMING_SNAKE_CASE_ : Tuple = input_file.read() SCREAMING_SNAKE_CASE_ : Union[str, Any] = regexp.search(lowercase__ ) return match def __lowerCamelCase ( self , lowercase__ ): """simple docstring""" with open(lowercase__ , encoding="utf-8" ) as input_file: SCREAMING_SNAKE_CASE_ : str = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) SCREAMING_SNAKE_CASE_ : Optional[Any] = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` SCREAMING_SNAKE_CASE_ : Dict = regexp.finditer(lowercase__ ) SCREAMING_SNAKE_CASE_ : Any = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def __lowerCamelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = Path("./datasets" ) SCREAMING_SNAKE_CASE_ : List[str] = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(lowercase__ ) ): raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" ) def __lowerCamelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = Path("./datasets" ) SCREAMING_SNAKE_CASE_ : Tuple = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(lowercase__ ) ): raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
421
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case_ = { 'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig'] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ['RemBertTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ['RemBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RemBertForCausalLM', 'RemBertForMaskedLM', 'RemBertForMultipleChoice', 'RemBertForQuestionAnswering', 'RemBertForSequenceClassification', 'RemBertForTokenClassification', 'RemBertLayer', 'RemBertModel', 'RemBertPreTrainedModel', 'load_tf_weights_in_rembert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRemBertForCausalLM', 'TFRemBertForMaskedLM', 'TFRemBertForMultipleChoice', 'TFRemBertForQuestionAnswering', 'TFRemBertForSequenceClassification', 'TFRemBertForTokenClassification', 'TFRemBertLayer', 'TFRemBertModel', 'TFRemBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
421
1
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(lowercase__ ) , 'Tatoeba directory does not exist.' ) class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCAmelCase ( self ) -> Dict: snake_case_ : Any = tempfile.mkdtemp() return TatoebaConverter(save_dir=__lowerCamelCase ) @slow def _lowerCAmelCase ( self ) -> str: self.resolver.convert_models(["heb-eng"] ) @slow def _lowerCAmelCase ( self ) -> int: snake_case_ : List[Any] = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCamelCase ) assert mmeta["long_pair"] == "heb-eng"
705
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase : Optional[int] = { '''configuration_nllb_moe''': [ '''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NllbMoeConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ '''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NllbMoeForConditionalGeneration''', '''NllbMoeModel''', '''NllbMoePreTrainedModel''', '''NllbMoeTop2Router''', '''NllbMoeSparseMLP''', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys lowercase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
114
0
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class UpperCamelCase_ : '''simple docstring''' def __init__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any]=100 , lowerCAmelCase__ :str=13 , lowerCAmelCase__ :str=30 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :Tuple=3 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :List[Any]=37 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Optional[Any]=10 , lowerCAmelCase__ :Any=0.02 , lowerCAmelCase__ :Tuple=3 , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :Any=[0, 1, 2, 3] , ) ->str: lowercase = parent lowercase = 100 lowercase = batch_size lowercase = image_size lowercase = patch_size lowercase = num_channels lowercase = is_training lowercase = use_labels lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = type_sequence_label_size lowercase = initializer_range lowercase = scope lowercase = out_indices lowercase = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase = (image_size // patch_size) ** 2 lowercase = num_patches + 1 def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->str: lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase = None lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowercase = self.get_config() return config, pixel_values, labels, pixel_labels def SCREAMING_SNAKE_CASE( self :Any ) ->Tuple: return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def SCREAMING_SNAKE_CASE( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple ) ->List[Any]: lowercase = BeitModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() lowercase = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE( self :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) ->Any: lowercase = BeitForMaskedImageModeling(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() lowercase = model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def SCREAMING_SNAKE_CASE( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Tuple ) ->List[Any]: lowercase = self.type_sequence_label_size lowercase = BeitForImageClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase = 1 lowercase = BeitForImageClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any ) ->List[str]: lowercase = self.num_labels lowercase = BeitForSemanticSegmentation(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() lowercase = model(lowerCAmelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def SCREAMING_SNAKE_CASE( self :List[str] ) ->Union[str, Any]: lowercase = self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase = config_and_inputs lowercase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ ( __a , __a , unittest.TestCase ): '''simple docstring''' UpperCamelCase : List[Any] = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) UpperCamelCase : Optional[Any] = ( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase : Optional[int] = False UpperCamelCase : List[Any] = False UpperCamelCase : Optional[Any] = False def SCREAMING_SNAKE_CASE( self :List[str] ) ->Optional[int]: lowercase = BeitModelTester(self ) lowercase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 ) def SCREAMING_SNAKE_CASE( self :Dict ) ->int: self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE( self :Any ) ->List[str]: pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def SCREAMING_SNAKE_CASE( self :str ) ->str: pass def SCREAMING_SNAKE_CASE( self :str ) ->List[str]: lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(lowerCAmelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) ) def SCREAMING_SNAKE_CASE( self :Tuple ) ->List[Any]: lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(lowerCAmelCase__ ) lowercase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase = [*signature.parameters.keys()] lowercase = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE( self :Dict ) ->Tuple: lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->Any: lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->int: lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE( self :int ) ->str: lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE( self :Dict ) ->str: if not self.model_tester.is_training: return lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() lowercase = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(lowerCAmelCase__ ), BeitForMaskedImageModeling]: continue lowercase = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.train() lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) lowercase = model(**lowerCAmelCase__ ).loss loss.backward() def SCREAMING_SNAKE_CASE( self :List[Any] ) ->str: lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return lowercase = False lowercase = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(lowerCAmelCase__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue lowercase = model_class(lowerCAmelCase__ ) model.gradient_checkpointing_enable() model.to(lowerCAmelCase__ ) model.train() lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) lowercase = model(**lowerCAmelCase__ ).loss loss.backward() def SCREAMING_SNAKE_CASE( self :str ) ->Dict: lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() lowercase = _config_zero_init(lowerCAmelCase__ ) for model_class in self.all_model_classes: lowercase = model_class(config=lowerCAmelCase__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def SCREAMING_SNAKE_CASE( self :int ) ->Tuple: for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = BeitModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def __snake_case ( ): '''simple docstring''' lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE( self :Any ) ->List[str]: return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE( self :List[str] ) ->List[str]: lowercase = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCAmelCase__ ) lowercase = self.default_image_processor lowercase = prepare_img() lowercase = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ ) # prepare bool_masked_pos lowercase = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): lowercase = model(pixel_values=lowerCAmelCase__ , bool_masked_pos=lowerCAmelCase__ ) lowercase = outputs.logits # verify the logits lowercase = torch.Size((1, 196, 8192) ) self.assertEqual(logits.shape , lowerCAmelCase__ ) lowercase = torch.tensor( [[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCAmelCase__ , atol=1E-2 ) ) @slow def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->str: lowercase = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCAmelCase__ ) lowercase = self.default_image_processor lowercase = prepare_img() lowercase = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): lowercase = model(**lowerCAmelCase__ ) lowercase = outputs.logits # verify the logits lowercase = torch.Size((1, 1000) ) self.assertEqual(logits.shape , lowerCAmelCase__ ) lowercase = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) lowercase = 281 self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase__ ) @slow def SCREAMING_SNAKE_CASE( self :List[str] ) ->List[str]: lowercase = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( lowerCAmelCase__ ) lowercase = self.default_image_processor lowercase = prepare_img() lowercase = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): lowercase = model(**lowerCAmelCase__ ) lowercase = outputs.logits # verify the logits lowercase = torch.Size((1, 21841) ) self.assertEqual(logits.shape , lowerCAmelCase__ ) lowercase = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) lowercase = 2396 self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase__ ) @slow def SCREAMING_SNAKE_CASE( self :int ) ->Any: lowercase = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) lowercase = model.to(lowerCAmelCase__ ) lowercase = BeitImageProcessor(do_resize=lowerCAmelCase__ , size=640 , do_center_crop=lowerCAmelCase__ ) lowercase = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) lowercase = Image.open(ds[0]["file"] ) lowercase = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): lowercase = model(**lowerCAmelCase__ ) lowercase = outputs.logits # verify the logits lowercase = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , lowerCAmelCase__ ) lowercase = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: lowercase = torch.tensor( [ [[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]], [[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]], [[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]], ] , device=lowerCAmelCase__ , ) else: lowercase = torch.tensor( [ [[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]], [[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]], [[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]], ] , device=lowerCAmelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) ) @slow def SCREAMING_SNAKE_CASE( self :List[str] ) ->Dict: lowercase = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) lowercase = model.to(lowerCAmelCase__ ) lowercase = BeitImageProcessor(do_resize=lowerCAmelCase__ , size=640 , do_center_crop=lowerCAmelCase__ ) lowercase = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) lowercase = Image.open(ds[0]["file"] ) lowercase = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): lowercase = model(**lowerCAmelCase__ ) lowercase = outputs.logits.detach().cpu() lowercase = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ , target_sizes=[(500, 300)] ) lowercase = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , lowerCAmelCase__ ) lowercase = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ ) lowercase = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
441
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) _snake_case : Tuple = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation="relu")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_28, activation="relu")) classifier.add(layers.Dense(units=1, activation="sigmoid")) # Compiling the CNN classifier.compile( optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') _snake_case : Optional[int] = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) _snake_case : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55) _snake_case : Dict = train_datagen.flow_from_directory( "dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary" ) _snake_case : Dict = test_datagen.flow_from_directory( "dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save("cnn.h5") # Part 3 - Making new predictions _snake_case : int = tf.keras.preprocessing.image.load_img( "dataset/single_prediction/image.png", target_size=(64, 64) ) _snake_case : Any = tf.keras.preprocessing.image.img_to_array(test_image) _snake_case : int = np.expand_dims(test_image, axis=0) _snake_case : Optional[Any] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: _snake_case : str = "Normal" if result[0][0] == 1: _snake_case : str = "Abnormality detected"
441
1
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowerCamelCase__ = logging.getLogger(__name__) def lowercase_ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int ): """simple docstring""" return (preds == labels).mean() @dataclass class _lowerCAmelCase : """simple docstring""" lowerCAmelCase__ =field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) lowerCAmelCase__ =field( default=__UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase__ =field( default=__UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) lowerCAmelCase__ =field( default=__UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class _lowerCAmelCase : """simple docstring""" lowerCAmelCase__ =field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} ) lowerCAmelCase__ =field(metadata={'''help''': '''Should contain the data files for the task.'''} ) lowerCAmelCase__ =field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) lowerCAmelCase__ =field( default=__UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def lowercase_ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case__ : Union[str, Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) snake_case__, snake_case__, snake_case__ : List[str] =parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE ) # Set seed set_seed(training_args.seed ) try: snake_case__ : Optional[int] =processors[data_args.task_name]() snake_case__ : int =processor.get_labels() snake_case__ : List[Any] =len(SCREAMING_SNAKE_CASE ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case__ : Optional[int] =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) snake_case__ : List[str] =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case__ : Union[str, Any] =AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , ) # Get datasets snake_case__ : List[str] =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) snake_case__ : int =( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(SCREAMING_SNAKE_CASE : EvalPrediction ) -> Dict: snake_case__ : List[Any] =np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE , p.label_ids )} # Data collator snake_case__ : int =DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer snake_case__ : str =Trainer( model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case__ : Union[str, Any] ={} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) snake_case__ : List[Any] =trainer.evaluate() snake_case__ : Tuple =os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) writer.write('''%s = %s\n''' % (key, value) ) results.update(SCREAMING_SNAKE_CASE ) return results def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
408
import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowerCAmelCase ( __UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ =KandinskyVaaPipeline lowerCAmelCase__ =[ '''image_embeds''', '''negative_image_embeds''', ] lowerCAmelCase__ =['''image_embeds''', '''negative_image_embeds'''] lowerCAmelCase__ =[ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowerCAmelCase__ =False @property def UpperCAmelCase ( self ) -> Optional[int]: """simple docstring""" return 32 @property def UpperCAmelCase ( self ) -> Any: """simple docstring""" return 32 @property def UpperCAmelCase ( self ) -> Tuple: """simple docstring""" return self.time_input_dim @property def UpperCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return self.time_input_dim * 4 @property def UpperCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return 100 @property def UpperCAmelCase ( self ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) snake_case__ : Any ={ '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } snake_case__ : Union[str, Any] =UNetaDConditionModel(**__SCREAMING_SNAKE_CASE ) return model @property def UpperCAmelCase ( self ) -> List[str]: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCAmelCase ( self ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) snake_case__ : List[Any] =VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase ( self ) -> Optional[int]: """simple docstring""" snake_case__ : Union[str, Any] =self.dummy_unet snake_case__ : Dict =self.dummy_movq snake_case__ : Any =DDIMScheduler( num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__SCREAMING_SNAKE_CASE , ) snake_case__ : List[str] ={ '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> List[str]: """simple docstring""" snake_case__ : Optional[Any] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) snake_case__ : str =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __SCREAMING_SNAKE_CASE ) if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ): snake_case__ : List[str] =torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: snake_case__ : Any =torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) snake_case__ : List[str] ={ '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def UpperCAmelCase ( self ) -> Any: """simple docstring""" snake_case__ : int ='''cpu''' snake_case__ : Dict =self.get_dummy_components() snake_case__ : List[Any] =self.pipeline_class(**__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] =pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] =pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) ) snake_case__ : Union[str, Any] =output.images snake_case__ : List[str] =pipe( **self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0] snake_case__ : List[Any] =image[0, -3:, -3:, -1] snake_case__ : Tuple =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case__ : List[Any] =np.array( [0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> str: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self ) -> Optional[int]: """simple docstring""" snake_case__ : str =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' ) snake_case__ : int =KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(__SCREAMING_SNAKE_CASE ) snake_case__ : List[Any] =KandinskyVaaPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) snake_case__ : Optional[Any] =pipeline.to(__SCREAMING_SNAKE_CASE ) pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) snake_case__ : int ='''red cat, 4k photo''' snake_case__ : Dict =torch.Generator(device='''cuda''' ).manual_seed(0 ) snake_case__, snake_case__ : Optional[Any] =pipe_prior( __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() snake_case__ : List[str] =torch.Generator(device='''cuda''' ).manual_seed(0 ) snake_case__ : Any =pipeline( image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , output_type='''np''' , ) snake_case__ : Optional[int] =output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
408
1
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem A__ = importlib.util.find_spec('''s3fs''') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 A__ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]: """simple docstring""" if "://" in dataset_path: snake_case__ : Any = dataset_path.split('''://''' )[1] return dataset_path def _lowerCAmelCase ( __lowerCAmelCase ) -> Any: """simple docstring""" if fs is not None and fs.protocol != "file": return True else: return False def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict: """simple docstring""" snake_case__ : str = not is_remote_filesystem(__a ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__a ) , fs._strip_protocol(__a ) ) else: fs.mv(__a , __a , recursive=__a ) def _lowerCAmelCase ( ) -> Any: """simple docstring""" if hasattr(fsspec.asyn , '''reset_lock''' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: snake_case__ : Tuple = None snake_case__ : Optional[int] = None snake_case__ : Dict = threading.Lock()
252
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('>=', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType lowerCAmelCase__ = get_logger(__name__) def _lowerCamelCase ( __a, __a, __a, __a, __a=0 ): os.makedirs(__a, exist_ok=__a ) with FSDP.state_dict_type( __a, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): SCREAMING_SNAKE_CASE_ = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: SCREAMING_SNAKE_CASE_ = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a ) if accelerator.process_index == 0: logger.info(F'Saving model to {output_model_file}' ) torch.save(__a, __a ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: SCREAMING_SNAKE_CASE_ = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a ) logger.info(F'Saving model to {output_model_file}' ) torch.save(__a, __a ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: SCREAMING_SNAKE_CASE_ = os.path.join(__a, F'{MODEL_NAME}_{model_index}' ) os.makedirs(__a, exist_ok=__a ) logger.info(F'Saving model to {ckpt_dir}' ) SCREAMING_SNAKE_CASE_ = {'''model''': state_dict} dist_cp.save_state_dict( state_dict=__a, storage_writer=dist_cp.FileSystemWriter(__a ), planner=DefaultSavePlanner(), ) logger.info(F'Model saved to {ckpt_dir}' ) def _lowerCamelCase ( __a, __a, __a, __a, __a=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( __a, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(__a ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( '''Set the `sync_module_states` flag to `True` so that model states are synced across processes when ''' '''initializing FSDP object''' ) return SCREAMING_SNAKE_CASE_ = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a ) logger.info(F'Loading model from {input_model_file}' ) SCREAMING_SNAKE_CASE_ = torch.load(__a ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: SCREAMING_SNAKE_CASE_ = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a ) logger.info(F'Loading model from {input_model_file}' ) SCREAMING_SNAKE_CASE_ = torch.load(__a ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: SCREAMING_SNAKE_CASE_ = ( os.path.join(__a, F'{MODEL_NAME}_{model_index}' ) if F'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(F'Loading model from {ckpt_dir}' ) SCREAMING_SNAKE_CASE_ = {'''model''': model.state_dict()} dist_cp.load_state_dict( state_dict=__a, storage_reader=dist_cp.FileSystemReader(__a ), planner=DefaultLoadPlanner(), ) SCREAMING_SNAKE_CASE_ = state_dict['''model'''] logger.info(F'Model loaded from {ckpt_dir}' ) model.load_state_dict(__a ) def _lowerCamelCase ( __a, __a, __a, __a, __a, __a=0 ): os.makedirs(__a, exist_ok=__a ) with FSDP.state_dict_type( __a, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): SCREAMING_SNAKE_CASE_ = FSDP.optim_state_dict(__a, __a ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: SCREAMING_SNAKE_CASE_ = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a ) logger.info(F'Saving Optimizer state to {output_optimizer_file}' ) torch.save(__a, __a ) logger.info(F'Optimizer state saved in {output_optimizer_file}' ) else: SCREAMING_SNAKE_CASE_ = os.path.join(__a, F'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(__a, exist_ok=__a ) logger.info(F'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={'''optimizer''': optim_state}, storage_writer=dist_cp.FileSystemWriter(__a ), planner=DefaultSavePlanner(), ) logger.info(F'Optimizer state saved in {ckpt_dir}' ) def _lowerCamelCase ( __a, __a, __a, __a, __a, __a=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( __a, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: SCREAMING_SNAKE_CASE_ = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: SCREAMING_SNAKE_CASE_ = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a ) logger.info(F'Loading Optimizer state from {input_optimizer_file}' ) SCREAMING_SNAKE_CASE_ = torch.load(__a ) logger.info(F'Optimizer state loaded from {input_optimizer_file}' ) else: SCREAMING_SNAKE_CASE_ = ( os.path.join(__a, F'{OPTIMIZER_NAME}_{optimizer_index}' ) if F'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(F'Loading Optimizer from {ckpt_dir}' ) SCREAMING_SNAKE_CASE_ = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict(), optimizer_key='''optimizer''', storage_reader=dist_cp.FileSystemReader(__a ), ) SCREAMING_SNAKE_CASE_ = optim_state['''optimizer'''] logger.info(F'Optimizer loaded from {ckpt_dir}' ) SCREAMING_SNAKE_CASE_ = FSDP.optim_state_dict_to_load(__a, __a, __a ) optimizer.load_state_dict(__a )
626
0
"""simple docstring""" from math import factorial SCREAMING_SNAKE_CASE : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} def lowercase ( _snake_case : int ) ->int: """simple docstring""" if not isinstance(_snake_case , _snake_case ): raise TypeError('''Parameter number must be int''' ) if number < 0: raise ValueError('''Parameter number must be greater than or equal to 0''' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(_snake_case ) ) def lowercase ( _snake_case : int = 60 , _snake_case : int = 1_000_000 ) ->int: """simple docstring""" if not isinstance(_snake_case , _snake_case ) or not isinstance(_snake_case , _snake_case ): raise TypeError('''Parameters chain_length and number_limit must be int''' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( '''Parameters chain_length and number_limit must be greater than 0''' ) # the counter for the chains with the exact desired length __snake_case : Dict = 0 # the cached sizes of the previous chains __snake_case : dict[int, int] = {} for start_chain_element in range(1 , _snake_case ): # The temporary set will contain the elements of the chain __snake_case : Any = set() __snake_case : Optional[int] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. __snake_case : Tuple = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(_snake_case ) chain_set_length += 1 __snake_case : Optional[int] = digit_factorial_sum(_snake_case ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] __snake_case : Optional[Any] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution()}')
229
"""simple docstring""" import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) SCREAMING_SNAKE_CASE : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : '''simple docstring''' lowerCamelCase__ =field( default=__snake_case, metadata={'help': 'Model type selected in the list: ' + ', '.join(__snake_case )} ) lowerCamelCase__ =field( default=__snake_case, metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} ) lowerCamelCase__ =field( default=128, metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) }, ) lowerCamelCase__ =field( default=128, metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'}, ) lowerCamelCase__ =field( default=64, metadata={ 'help': ( 'The maximum number of tokens for the question. Questions longer than this will ' 'be truncated to this length.' ) }, ) lowerCamelCase__ =field( default=30, metadata={ 'help': ( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ) }, ) lowerCamelCase__ =field( default=__snake_case, metadata={'help': 'Overwrite the cached training and evaluation sets'} ) lowerCamelCase__ =field( default=__snake_case, metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} ) lowerCamelCase__ =field( default=0.0, metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowerCamelCase__ =field( default=20, metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowerCamelCase__ =field( default=0, metadata={ 'help': ( 'language id of input for language-specific xlm models (see' ' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)' ) }, ) lowerCamelCase__ =field(default=1, metadata={'help': 'multiple threads for converting example to features'} ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='train' lowerCamelCase__ ='dev' class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =42 lowerCamelCase__ =42 def __init__(self , a_ , a_ , a_ = None , a_ = Split.train , a_ = False , a_ = None , a_ = "pt" , ): '''simple docstring''' __snake_case : Any = args __snake_case : Dict = is_language_sensitive __snake_case : Optional[int] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(a_ , a_ ): try: __snake_case : str = Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) __snake_case : Union[str, Any] = mode # Load data features from cache or dataset file __snake_case : Optional[int] = '''v2''' if args.version_2_with_negative else '''v1''' __snake_case : int = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __snake_case : Union[str, Any] = cached_features_file + '''.lock''' with FileLock(a_ ): if os.path.exists(a_ ) and not args.overwrite_cache: __snake_case : Optional[int] = time.time() __snake_case : Dict = torch.load(a_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. __snake_case : Optional[int] = self.old_features['''features'''] __snake_case : Union[str, Any] = self.old_features.get('''dataset''' , a_ ) __snake_case : Dict = self.old_features.get('''examples''' , a_ ) logger.info( f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in""" ''' future run''' ) else: if mode == Split.dev: __snake_case : Optional[int] = self.processor.get_dev_examples(args.data_dir ) else: __snake_case : List[Any] = self.processor.get_train_examples(args.data_dir ) __snake_case , __snake_case : Optional[int] = squad_convert_examples_to_features( examples=self.examples , tokenizer=a_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=a_ , ) __snake_case : Optional[Any] = time.time() torch.save( {'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , a_ , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" ) def __len__(self ): '''simple docstring''' return len(self.features ) def __getitem__(self , a_ ): '''simple docstring''' __snake_case : List[str] = self.features[i] __snake_case : str = torch.tensor(feature.input_ids , dtype=torch.long ) __snake_case : Any = torch.tensor(feature.attention_mask , dtype=torch.long ) __snake_case : Optional[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long ) __snake_case : Any = torch.tensor(feature.cls_index , dtype=torch.long ) __snake_case : Tuple = torch.tensor(feature.p_mask , dtype=torch.float ) __snake_case : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float ) __snake_case : Union[str, Any] = { '''input_ids''': input_ids, '''attention_mask''': attention_mask, '''token_type_ids''': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} ) if self.args.version_2_with_negative: inputs.update({'''is_impossible''': is_impossible} ) if self.is_language_sensitive: inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: __snake_case : int = torch.tensor(feature.start_position , dtype=torch.long ) __snake_case : str = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} ) return inputs
229
1
import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ : Tuple =[ 'word_embeddings_layernorm.weight', 'word_embeddings_layernorm.bias', 'input_layernorm.weight', 'input_layernorm.bias', 'post_attention_layernorm.weight', 'post_attention_layernorm.bias', 'self_attention.dense.bias', 'mlp.dense_4h_to_h.bias', 'ln_f.weight', 'ln_f.bias', ] lowerCAmelCase__ : Optional[Any] =[ 'mlp.dense_4h_to_h.weight', 'self_attention.dense.weight', ] def a__ ( A__, A__ ): SCREAMING_SNAKE_CASE_ : str = { 'word_embeddings.weight': 'word_embeddings.weight', 'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight', 'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias', 'weight': 'ln_f.weight', 'bias': 'ln_f.bias', } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks SCREAMING_SNAKE_CASE_ : Optional[Any] = int(re.match(r'.*layer_(\d*).*', A__ )[1] ) layer_number -= 3 return F'''h.{layer_number}.''' + key def a__ ( A__ ): if dtype == torch.bool: return 1 / 8 SCREAMING_SNAKE_CASE_ : str = re.search(r'[^\d](\d+)$', str(A__ ) ) if bit_search is None: raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' ) SCREAMING_SNAKE_CASE_ : Dict = int(bit_search.groups()[0] ) return bit_size // 8 def a__ ( A__, A__, A__, A__, A__ ): # Construct model if bloom_config_file == "": SCREAMING_SNAKE_CASE_ : Optional[int] = BloomConfig() else: SCREAMING_SNAKE_CASE_ : Optional[Any] = BloomConfig.from_json_file(A__ ) if shard_model: SCREAMING_SNAKE_CASE_ : str = os.listdir(A__ ) SCREAMING_SNAKE_CASE_ : str = sorted(filter(lambda A__ : s.startswith('layer' ) and "model_00" in s, A__ ) ) SCREAMING_SNAKE_CASE_ : Tuple = {'weight_map': {}, 'metadata': {}} SCREAMING_SNAKE_CASE_ : int = 0 SCREAMING_SNAKE_CASE_ : str = None SCREAMING_SNAKE_CASE_ : Union[str, Any] = BloomConfig() for j, file in enumerate(A__ ): print('Processing file: {}'.format(A__ ) ) SCREAMING_SNAKE_CASE_ : Dict = None for i in range(A__ ): # load all TP files SCREAMING_SNAKE_CASE_ : Optional[Any] = file.replace('model_00', F'''model_0{i}''' ) SCREAMING_SNAKE_CASE_ : List[Any] = torch.load(os.path.join(A__, A__ ), map_location='cpu' ) # Rename keys in the transformers names SCREAMING_SNAKE_CASE_ : Dict = list(temp.keys() ) for key in keys: SCREAMING_SNAKE_CASE_ : Optional[Any] = temp.pop(A__ ) if tensors is None: SCREAMING_SNAKE_CASE_ : Tuple = temp else: for key in tensors.keys(): if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel SCREAMING_SNAKE_CASE_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks SCREAMING_SNAKE_CASE_ : Optional[int] = torch.cat([tensors[key], temp[key]], dim=A__ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): SCREAMING_SNAKE_CASE_ : Optional[int] = tensors[key] / pretraining_tp torch.save( A__, os.path.join( A__, 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ), str(len(A__ ) ).zfill(5 ) ), ), ) for key in tensors.keys(): SCREAMING_SNAKE_CASE_ : str = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: SCREAMING_SNAKE_CASE_ : List[Any] = 'pytorch_model_{}-of-{}.bin'.format( str(j + 1 ).zfill(5 ), str(len(A__ ) ).zfill(5 ) ) SCREAMING_SNAKE_CASE_ : Any = BloomConfig() SCREAMING_SNAKE_CASE_ : str = pytorch_dump_folder_path + '/' + CONFIG_NAME SCREAMING_SNAKE_CASE_ : Union[str, Any] = total_size with open(A__, 'w', encoding='utf-8' ) as f: f.write(config.to_json_string() ) with open(os.path.join(A__, WEIGHTS_NAME + '.index.json' ), 'w', encoding='utf-8' ) as f: SCREAMING_SNAKE_CASE_ : Dict = json.dumps(A__, indent=2, sort_keys=A__ ) + '\n' f.write(A__ ) else: SCREAMING_SNAKE_CASE_ : Tuple = BloomModel(A__ ) SCREAMING_SNAKE_CASE_ : int = os.listdir(A__ ) SCREAMING_SNAKE_CASE_ : Tuple = sorted(filter(lambda A__ : s.startswith('layer' ) and "model_00" in s, A__ ) ) SCREAMING_SNAKE_CASE_ : Tuple = None for i, file in enumerate(A__ ): SCREAMING_SNAKE_CASE_ : Dict = None for i in range(A__ ): # load all TP files SCREAMING_SNAKE_CASE_ : Dict = file.replace('model_00', F'''model_0{i}''' ) SCREAMING_SNAKE_CASE_ : Tuple = torch.load(os.path.join(A__, A__ ), map_location='cpu' ) # Rename keys in the transformers names SCREAMING_SNAKE_CASE_ : str = list(temp.keys() ) for key in keys: SCREAMING_SNAKE_CASE_ : int = temp.pop(A__ ) if tensors is None: SCREAMING_SNAKE_CASE_ : List[Any] = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel SCREAMING_SNAKE_CASE_ : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat([tensors[key], temp[key]], dim=A__ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): SCREAMING_SNAKE_CASE_ : Any = tensors[key] / pretraining_tp SCREAMING_SNAKE_CASE_ : Any = model.load_state_dict(A__, strict=A__ ) assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected''' if missing_keys is None: SCREAMING_SNAKE_CASE_ : Tuple = set(other_keys.missing_keys ) else: SCREAMING_SNAKE_CASE_ : str = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, F'''The keys {missing_keys} are missing''' # Save pytorch-model os.makedirs(A__, exist_ok=A__ ) SCREAMING_SNAKE_CASE_ : Any = pytorch_dump_folder_path + '/' + WEIGHTS_NAME SCREAMING_SNAKE_CASE_ : List[str] = pytorch_dump_folder_path + '/' + CONFIG_NAME print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' ) if config.torch_dtype is not None: SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.to(config.torch_dtype ) torch.save(model.state_dict(), A__ ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(A__, 'w', encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase__ : Optional[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( '--bloom_checkpoint_path', default=None, type=str, required=True, help='Path to the Megatron-LM checkpoint path.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--bloom_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--shard_model', action='store_true', help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint', ) parser.add_argument( '--pretraining_tp', default=4, type=int, help='Pretraining TP rank that has been used when training the model in Megatron-LM \n', ) lowerCAmelCase__ : List[str] =parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
101
'''simple docstring''' # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) __lowerCAmelCase = "pytorch_model.bin" __lowerCAmelCase = "pytorch_model.bin.index.json" __lowerCAmelCase = "adapter_config.json" __lowerCAmelCase = "adapter_model.bin" __lowerCAmelCase = "adapter_model.safetensors" __lowerCAmelCase = "tf_model.h5" __lowerCAmelCase = "tf_model.h5.index.json" __lowerCAmelCase = "model.ckpt" __lowerCAmelCase = "flax_model.msgpack" __lowerCAmelCase = "flax_model.msgpack.index.json" __lowerCAmelCase = "model.safetensors" __lowerCAmelCase = "model.safetensors.index.json" __lowerCAmelCase = "config.json" __lowerCAmelCase = "preprocessor_config.json" __lowerCAmelCase = FEATURE_EXTRACTOR_NAME __lowerCAmelCase = "generation_config.json" __lowerCAmelCase = "modelcard.json" __lowerCAmelCase = "▁" __lowerCAmelCase = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility __lowerCAmelCase = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. __lowerCAmelCase = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] __lowerCAmelCase = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def __UpperCamelCase ( lowercase_ : Optional[int] ): """simple docstring""" if version.parse(lowercase_ ) < version.parse(lowercase_ ): if "dev" in min_version: a_ = ( 'This example requires a source install from HuggingFace Transformers (see ' '`https://huggingface.co/docs/transformers/installation#install-from-source`),' ) else: a_ = F'This example requires a minimum version of {min_version},' error_message += F' but the version found is {__version__}.\n' raise ImportError( error_message + 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other ' 'versions of HuggingFace Transformers.' )
536
0
from __future__ import annotations import os from collections.abc import Mapping _lowercase = tuple[int, int] class _UpperCAmelCase : def __init__( self , a__ , a__): A__ = vertices A__ = { (min(a__), max(a__)): weight for edge, weight in edges.items() } def snake_case_ ( self , a__ , a__): self.vertices.add(edge[0]) self.vertices.add(edge[1]) A__ = weight def snake_case_ ( self): A__ = Graph({min(self.vertices)} , {}) A__ = 42 A__ = 42 A__ = 42 A__ = 42 while len(subgraph.vertices) < len(self.vertices): A__ = max(self.edges.values()) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: A__ = edge A__ = weight subgraph.add_edge(a__ , a__) return subgraph def lowerCAmelCase__ ( UpperCamelCase_ : str = "p107_network.txt" )-> int: A__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) ) A__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ ) A__ = {} A__ = 42 A__ = 42 A__ = 42 with open(UpperCamelCase_ ) as f: A__ = f.read().strip().split('''\n''' ) A__ = [line.split(''',''' ) for line in data] for edgea in range(1 , len(UpperCamelCase_ ) ): for edgea in range(UpperCamelCase_ ): if adjaceny_matrix[edgea][edgea] != "-": A__ = int(adjaceny_matrix[edgea][edgea] ) A__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ ) A__ = graph.prims_algorithm() A__ = sum(graph.edges.values() ) A__ = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F"{solution() = }")
526
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class _UpperCAmelCase ( A__ ): UpperCamelCase__ = '''Salesforce/blip-image-captioning-base''' UpperCamelCase__ = ( '''This is a tool that generates a description of an image. It takes an input named `image` which should be the ''' '''image to caption, and returns a text that contains the description in English.''' ) UpperCamelCase__ = '''image_captioner''' UpperCamelCase__ = AutoModelForVisionaSeq UpperCamelCase__ = ['''image'''] UpperCamelCase__ = ['''text'''] def __init__( self , *a__ , **a__): requires_backends(self , ['''vision''']) super().__init__(*a__ , **a__) def snake_case_ ( self , a__): return self.pre_processor(images=a__ , return_tensors='''pt''') def snake_case_ ( self , a__): return self.model.generate(**a__) def snake_case_ ( self , a__): return self.pre_processor.batch_decode(a__ , skip_special_tokens=a__)[0].strip()
526
1
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class SCREAMING_SNAKE_CASE__ : @staticmethod def A__ ( *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Tuple ): """simple docstring""" pass @is_pipeline_test @require_vision class SCREAMING_SNAKE_CASE__ (unittest.TestCase ): @require_torch def A__ ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ = image_classifier(__lowerCamelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowerCamelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) lowerCAmelCase__ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, ], ] , ) @require_tf def A__ ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ = image_classifier(__lowerCamelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) lowerCAmelCase__ = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, {'''score''': 0.333, '''label''': ANY(__lowerCamelCase )}, ], ] , ) @slow @require_torch def A__ ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ = image_classifier(__lowerCamelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def A__ ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ = image_classifier(__lowerCamelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowerCamelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
615
from __future__ import annotations def a_ ( __lowerCAmelCase , __lowerCAmelCase = None ): lowerCAmelCase__ = word_bank or [] # create a table lowerCAmelCase__ = len(__lowerCAmelCase ) + 1 lowerCAmelCase__ = [] for _ in range(__lowerCAmelCase ): table.append([] ) # seed value lowerCAmelCase__ = [[]] # because empty string has empty combination # iterate through the indices for i in range(__lowerCAmelCase ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(__lowerCAmelCase )] == word: lowerCAmelCase__ = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(__lowerCAmelCase )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(__lowerCAmelCase )]: combination.reverse() return table[len(__lowerCAmelCase )] if __name__ == "__main__": print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""])) print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""])) print( all_construct( """hexagonosaurus""", ["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""], ) )
615
1
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # Initialise PyTorch model lowercase = BigBirdConfig.from_json_file(__SCREAMING_SNAKE_CASE ) print(F'''Building PyTorch model from configuration: {config}''' ) if is_trivia_qa: lowercase = BigBirdForQuestionAnswering(__SCREAMING_SNAKE_CASE ) else: lowercase = BigBirdForPreTraining(__SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , is_trivia_qa=__SCREAMING_SNAKE_CASE ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--big_bird_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.''' ) UpperCAmelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
565
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : Optional[int] = (UnCLIPScheduler,) def SCREAMING_SNAKE_CASE__ ( self , **snake_case ): lowercase = { 'num_train_timesteps': 1000, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**snake_case ) return config def SCREAMING_SNAKE_CASE__ ( self ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=snake_case , prev_timestep=snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config(variance_type='fixed_small_log' ) lowercase = scheduler_class(**snake_case ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config(variance_type='learned_range' ) lowercase = scheduler_class(**snake_case ) lowercase = 0.5 assert scheduler._get_variance(1 , predicted_variance=snake_case ) - -10.1_712_790 < 1E-5 assert scheduler._get_variance(487 , predicted_variance=snake_case ) - -5.7_998_052 < 1E-5 assert scheduler._get_variance(999 , predicted_variance=snake_case ) - -0.0_010_011 < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**snake_case ) lowercase = scheduler.timesteps lowercase = self.dummy_model() lowercase = self.dummy_sample_deter lowercase = torch.manual_seed(0 ) for i, t in enumerate(snake_case ): # 1. predict noise residual lowercase = model(snake_case , snake_case ) # 2. predict previous mean of sample x_t-1 lowercase = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample lowercase = pred_prev_sample lowercase = torch.sum(torch.abs(snake_case ) ) lowercase = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**snake_case ) scheduler.set_timesteps(25 ) lowercase = scheduler.timesteps lowercase = self.dummy_model() lowercase = self.dummy_sample_deter lowercase = torch.manual_seed(0 ) for i, t in enumerate(snake_case ): # 1. predict noise residual lowercase = model(snake_case , snake_case ) if i + 1 == timesteps.shape[0]: lowercase = None else: lowercase = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowercase = scheduler.step( snake_case , snake_case , snake_case , prev_timestep=snake_case , generator=snake_case ).prev_sample lowercase = pred_prev_sample lowercase = torch.sum(torch.abs(snake_case ) ) lowercase = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): pass def SCREAMING_SNAKE_CASE__ ( self ): pass
565
1
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def UpperCamelCase ( ) -> Any: UpperCamelCase : Optional[int] = ArgumentParser( description=( 'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes' ) ) # Optional arguments for the launch helper parser.add_argument('--num_cores' , type=snake_case__ , default=1 , help='Number of TPU cores to use (1 or 8).' ) # positional parser.add_argument( 'training_script' , type=snake_case__ , help=( 'The full path to the single TPU training ' 'program/script to be launched in parallel, ' 'followed by all the arguments for the ' 'training script' ) , ) # rest from the training program parser.add_argument('training_script_args' , nargs=snake_case__ ) return parser.parse_args() def UpperCamelCase ( ) -> Optional[int]: UpperCamelCase : Optional[Any] = parse_args() # Import training_script as a module. UpperCamelCase : Any = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) UpperCamelCase : Dict = script_fpath.stem UpperCamelCase : Optional[int] = importlib.import_module(snake_case__ ) # Patch sys.argv UpperCamelCase : Dict = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
40
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCamelCase ( snake_case__ : int ) -> Dict: UpperCamelCase : Optional[Any] = tmp_path / 'file.csv' UpperCamelCase : Optional[Any] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]: UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv' UpperCamelCase : Any = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str: UpperCamelCase : Any = tmp_path / 'csv_with_image.csv' UpperCamelCase : Dict = textwrap.dedent( F"""\ image {image_file} """ ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple: UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv' UpperCamelCase : Dict = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) @pytest.fixture def UpperCamelCase ( snake_case__ : Dict ) -> List[str]: UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv' UpperCamelCase : Union[str, Any] = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(snake_case__ , 'w' ) as f: f.write(snake_case__ ) return str(snake_case__ ) def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]: UpperCamelCase : str = Csv() UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(snake_case__ , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(snake_case__ ) in record.message for record in caplog.records ) @require_pil def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]: with open(snake_case__ , encoding='utf-8' ) as f: UpperCamelCase : List[str] = f.read().splitlines()[1] UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] ) UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() UpperCamelCase : str = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCamelCase ( snake_case__ : Any ) -> str: with open(snake_case__ , encoding='utf-8' ) as f: UpperCamelCase : Any = f.read().splitlines()[1:] UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] ) UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() UpperCamelCase : List[str] = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels] def UpperCamelCase ( snake_case__ : str ) -> List[Any]: UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} ) UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] ) UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) UpperCamelCase : str = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
40
1
"""simple docstring""" from collections.abc import Sequence from queue import Queue class A__ : """simple docstring""" def __init__( self: int , __a: List[str] , __a: Union[str, Any] , __a: Union[str, Any] , __a: Tuple=None , __a: str=None )-> Union[str, Any]: lowerCamelCase : Any = start lowerCamelCase : Union[str, Any] = end lowerCamelCase : List[Any] = val lowerCamelCase : Optional[int] = (start + end) // 2 lowerCamelCase : Dict = left lowerCamelCase : str = right def __repr__( self: List[Any] )-> Optional[Any]: return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})' class A__ : """simple docstring""" def __init__( self: int , __a: Sequence , __a: Optional[Any] )-> Any: lowerCamelCase : Optional[int] = collection lowerCamelCase : str = function if self.collection: lowerCamelCase : Optional[Any] = self._build_tree(0 , len(__a ) - 1 ) def a__ ( self: List[Any] , __a: Tuple , __a: Union[str, Any] )-> Any: self._update_tree(self.root , __a , __a ) def a__ ( self: Any , __a: List[str] , __a: Optional[int] )-> Union[str, Any]: return self._query_range(self.root , __a , __a ) def a__ ( self: List[Any] , __a: str , __a: Dict )-> Dict: if start == end: return SegmentTreeNode(__a , __a , self.collection[start] ) lowerCamelCase : int = (start + end) // 2 lowerCamelCase : Tuple = self._build_tree(__a , __a ) lowerCamelCase : Union[str, Any] = self._build_tree(mid + 1 , __a ) return SegmentTreeNode(__a , __a , self.fn(left.val , right.val ) , __a , __a ) def a__ ( self: Union[str, Any] , __a: Dict , __a: str , __a: Optional[Any] )-> Any: if node.start == i and node.end == i: lowerCamelCase : Optional[Any] = val return if i <= node.mid: self._update_tree(node.left , __a , __a ) else: self._update_tree(node.right , __a , __a ) lowerCamelCase : Optional[int] = self.fn(node.left.val , node.right.val ) def a__ ( self: Tuple , __a: Dict , __a: Optional[Any] , __a: int )-> Optional[int]: if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , __a , __a ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , __a , node.mid ) , self._query_range(node.right , node.mid + 1 , __a ) , ) else: # range in right child tree return self._query_range(node.right , __a , __a ) def a__ ( self: List[Any] )-> Any: if self.root is not None: lowerCamelCase : Union[str, Any] = Queue() queue.put(self.root ) while not queue.empty(): lowerCamelCase : List[Any] = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('*' * 50) __lowerCamelCase :int = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
42
"""simple docstring""" import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : """simple docstring""" def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]: lowerCamelCase : Optional[int] = parent lowerCamelCase : Optional[int] = batch_size lowerCamelCase : Any = image_size lowerCamelCase : Tuple = num_channels lowerCamelCase : str = num_stages lowerCamelCase : List[str] = hidden_sizes lowerCamelCase : str = depths lowerCamelCase : Dict = is_training lowerCamelCase : Optional[Any] = use_labels lowerCamelCase : List[str] = intermediate_size lowerCamelCase : List[str] = hidden_act lowerCamelCase : List[str] = num_labels lowerCamelCase : Union[str, Any] = initializer_range lowerCamelCase : List[Any] = out_features lowerCamelCase : Optional[Any] = out_indices lowerCamelCase : int = scope def a__ ( self: str )-> Optional[Any]: lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Dict = None if self.use_labels: lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase : Any = self.get_config() return config, pixel_values, labels def a__ ( self: Dict )-> Union[str, Any]: return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]: lowerCamelCase : Optional[int] = ConvNextModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Any = model(__a ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]: lowerCamelCase : str = ConvNextForImageClassification(__a ) model.to(__a ) model.eval() lowerCamelCase : Any = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]: lowerCamelCase : List[str] = ConvNextBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : int = model(__a ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowerCamelCase : Tuple = None lowerCamelCase : List[str] = ConvNextBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : List[Any] = model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a__ ( self: Optional[Any] )-> Any: lowerCamelCase : List[Any] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs lowerCamelCase : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : int =( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) snake_case__ : str =( {'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification} if is_torch_available() else {} ) snake_case__ : Union[str, Any] =True snake_case__ : Optional[int] =False snake_case__ : Tuple =False snake_case__ : Union[str, Any] =False snake_case__ : Tuple =False def a__ ( self: Optional[Any] )-> Union[str, Any]: lowerCamelCase : Tuple = ConvNextModelTester(self ) lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def a__ ( self: Optional[int] )-> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self: Optional[int] )-> Optional[Any]: return @unittest.skip(reason="""ConvNext does not use inputs_embeds""" ) def a__ ( self: int )-> Dict: pass @unittest.skip(reason="""ConvNext does not support input and output embeddings""" ) def a__ ( self: Dict )-> Optional[Any]: pass @unittest.skip(reason="""ConvNext does not use feedforward chunking""" ) def a__ ( self: int )-> List[Any]: pass def a__ ( self: Union[str, Any] )-> int: lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Any = model_class(__a ) lowerCamelCase : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Optional[Any] = [*signature.parameters.keys()] lowerCamelCase : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def a__ ( self: Optional[int] )-> str: lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a__ ( self: str )-> int: lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__a ) def a__ ( self: int )-> Optional[int]: def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ): lowerCamelCase : str = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) ) lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase : Optional[int] = self.model_tester.num_stages self.assertEqual(len(__a ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[Any] = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : Tuple = True check_hidden_states_output(__a , __a , __a ) def a__ ( self: Dict )-> Optional[Any]: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def a__ ( self: Optional[Any] )-> Tuple: for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : str = ConvNextModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def snake_case ( ) -> Optional[int]: lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class A__ ( unittest.TestCase): """simple docstring""" @cached_property def a__ ( self: Dict )-> Union[str, Any]: return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None @slow def a__ ( self: List[str] )-> Dict: lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a ) lowerCamelCase : Dict = self.default_image_processor lowerCamelCase : Union[str, Any] = prepare_img() lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): lowerCamelCase : Any = model(**__a ) # verify the logits lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __a ) lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) ) @require_torch class A__ ( unittest.TestCase , __lowercase): """simple docstring""" snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else () snake_case__ : Optional[Any] =ConvNextConfig snake_case__ : Optional[Any] =False def a__ ( self: List[str] )-> int: lowerCamelCase : Dict = ConvNextModelTester(self )
42
1
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants _UpperCamelCase = Mapping[str, np.ndarray] _UpperCamelCase = Mapping[str, Any] # Is a nested dict. _UpperCamelCase = 0.01 @dataclasses.dataclass(frozen=a ) class _lowerCamelCase : """simple docstring""" UpperCAmelCase_ : np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. UpperCAmelCase_ : np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. UpperCAmelCase_ : np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. UpperCAmelCase_ : np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. UpperCAmelCase_ : np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions UpperCAmelCase_ : Optional[np.ndarray] =None # Optional remark about the protein. Included as a comment in output PDB # files UpperCAmelCase_ : Optional[str] =None # Templates used to generate this protein (prediction-only) UpperCAmelCase_ : Optional[Sequence[str]] =None # Chain corresponding to each parent UpperCAmelCase_ : Optional[Sequence[int]] =None def lowerCAmelCase__( lowercase : str ) -> Protein: __snake_case : Dict = R"(\[[A-Z]+\]\n)" __snake_case : List[str] = [tag.strip() for tag in re.split(lowercase , lowercase ) if len(lowercase ) > 0] __snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] ) __snake_case : List[str] = ["N", "CA", "C"] __snake_case : List[Any] = None __snake_case : Dict = None __snake_case : str = None for g in groups: if "[PRIMARY]" == g[0]: __snake_case : str = g[1][0].strip() for i in range(len(lowercase ) ): if seq[i] not in residue_constants.restypes: __snake_case : int = "X" # FIXME: strings are immutable __snake_case : Tuple = np.array( [residue_constants.restype_order.get(lowercase , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: __snake_case : List[List[float]] = [] for axis in range(3 ): tertiary.append(list(map(lowercase , g[1][axis].split() ) ) ) __snake_case : Optional[Any] = np.array(lowercase ) __snake_case : Optional[int] = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(lowercase ): __snake_case : List[str] = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: __snake_case : Dict = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) ) __snake_case : Dict = np.zeros( ( len(lowercase ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(lowercase ): __snake_case : Tuple = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=lowercase , atom_mask=lowercase , aatype=lowercase , residue_index=np.arange(len(lowercase ) ) , b_factors=lowercase , ) def lowerCAmelCase__( lowercase : Protein , lowercase : int = 0 ) -> List[str]: __snake_case : List[str] = [] __snake_case : str = prot.remark if remark is not None: pdb_headers.append(f"""REMARK {remark}""" ) __snake_case : Optional[Any] = prot.parents __snake_case : str = prot.parents_chain_index if parents is not None and parents_chain_index is not None: __snake_case : Union[str, Any] = [p for i, p in zip(lowercase , lowercase ) if i == chain_id] if parents is None or len(lowercase ) == 0: __snake_case : Union[str, Any] = ["N/A"] pdb_headers.append(f"""PARENT {" ".join(lowercase )}""" ) return pdb_headers def lowerCAmelCase__( lowercase : Protein , lowercase : str ) -> str: __snake_case : List[str] = [] __snake_case : str = pdb_str.split("\n" ) __snake_case : List[str] = prot.remark if remark is not None: out_pdb_lines.append(f"""REMARK {remark}""" ) __snake_case : List[List[str]] if prot.parents is not None and len(prot.parents ) > 0: __snake_case : List[Any] = [] if prot.parents_chain_index is not None: __snake_case : Dict[str, List[str]] = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(lowercase ) , [] ) parent_dict[str(lowercase )].append(lowercase ) __snake_case : str = max([int(lowercase ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): __snake_case : Union[str, Any] = parent_dict.get(str(lowercase ) , ["N/A"] ) parents_per_chain.append(lowercase ) else: parents_per_chain.append(list(prot.parents ) ) else: __snake_case : List[str] = [["N/A"]] def make_parent_line(lowercase : Sequence[str] ) -> str: return f"""PARENT {" ".join(lowercase )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) __snake_case : Dict = 0 for i, l in enumerate(lowercase ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(lowercase ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(lowercase ): __snake_case : List[Any] = parents_per_chain[chain_counter] else: __snake_case : int = ["N/A"] out_pdb_lines.append(make_parent_line(lowercase ) ) return "\n".join(lowercase ) def lowerCAmelCase__( lowercase : Protein ) -> str: __snake_case : Union[str, Any] = residue_constants.restypes + ["X"] def res_atoa(lowercase : int ) -> str: return residue_constants.restype_atoa.get(restypes[r] , "UNK" ) __snake_case : List[str] = residue_constants.atom_types __snake_case : List[str] = [] __snake_case : int = prot.atom_mask __snake_case : int = prot.aatype __snake_case : List[Any] = prot.atom_positions __snake_case : str = prot.residue_index.astype(np.intaa ) __snake_case : Optional[int] = prot.b_factors __snake_case : List[str] = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError("Invalid aatypes." ) __snake_case : Optional[int] = get_pdb_headers(lowercase ) if len(lowercase ) > 0: pdb_lines.extend(lowercase ) __snake_case : str = aatype.shape[0] __snake_case : int = 1 __snake_case : str = 0 __snake_case : List[Any] = string.ascii_uppercase __snake_case : Tuple = None # Add all atom sites. for i in range(lowercase ): __snake_case : List[Any] = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue __snake_case : Dict = "ATOM" __snake_case : str = atom_name if len(lowercase ) == 4 else f""" {atom_name}""" __snake_case : Optional[Any] = "" __snake_case : Union[str, Any] = "" __snake_case : str = 1.0_0 __snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works. __snake_case : Optional[int] = "" __snake_case : List[str] = "A" if chain_index is not None: __snake_case : Optional[Any] = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! __snake_case : List[str] = ( f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" f"""{res_name_a:>3} {chain_tag:>1}""" f"""{residue_index[i]:>4}{insertion_code:>1} """ f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" f"""{occupancy:>6.2f}{b_factor:>6.2f} """ f"""{element:>2}{charge:>2}""" ) pdb_lines.append(lowercase ) atom_index += 1 __snake_case : List[str] = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: __snake_case : Tuple = True __snake_case : Dict = chain_index[i + 1] if should_terminate: # Close the chain. __snake_case : str = "TER" __snake_case : str = ( f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(lowercase ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(lowercase , lowercase ) ) pdb_lines.append("END" ) pdb_lines.append("" ) return "\n".join(lowercase ) def lowerCAmelCase__( lowercase : Protein ) -> np.ndarray: return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def lowerCAmelCase__( lowercase : FeatureDict , lowercase : ModelOutput , lowercase : Optional[np.ndarray] = None , lowercase : Optional[np.ndarray] = None , lowercase : Optional[str] = None , lowercase : Optional[Sequence[str]] = None , lowercase : Optional[Sequence[int]] = None , ) -> Protein: return Protein( aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=lowercase , remark=lowercase , parents=lowercase , parents_chain_index=lowercase , )
243
from sklearn.metrics import mean_squared_error import datasets _UpperCamelCase = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' _UpperCamelCase = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' _UpperCamelCase = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCamelCase ( datasets.Metric ): """simple docstring""" def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html" ] , ) def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("float" ) ), "references": datasets.Sequence(datasets.Value("float" ) ), } else: return { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase="uniform_average" , UpperCAmelCase=True ) -> Tuple: '''simple docstring''' __snake_case : List[str] = mean_squared_error( UpperCAmelCase , UpperCAmelCase , sample_weight=UpperCAmelCase , multioutput=UpperCAmelCase , squared=UpperCAmelCase ) return {"mse": mse}
243
1
"""simple docstring""" import numpy as np def lowercase__ ( lowerCAmelCase__ : np.array ) -> np.array: '''simple docstring''' return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
251
"""simple docstring""" from __future__ import annotations import math def lowercase__ ( lowerCAmelCase__ : float , lowerCAmelCase__ : int ) -> float: '''simple docstring''' a__ : Optional[Any] = u for i in range(1 , lowerCAmelCase__ ): a__ : Any = temp * (u - i) return temp def lowercase__ ( ) -> None: '''simple docstring''' a__ : List[str] = int(input("enter the numbers of values: " ) ) a__ : list[list[float]] = [] for _ in range(lowerCAmelCase__ ): y.append([] ) for i in range(lowerCAmelCase__ ): for j in range(lowerCAmelCase__ ): y[i].append(lowerCAmelCase__ ) a__ : Union[str, Any] = 0 print("enter the values of parameters in a list: " ) a__ : Optional[Any] = list(map(lowerCAmelCase__ , input().split() ) ) print("enter the values of corresponding parameters: " ) for i in range(lowerCAmelCase__ ): a__ : Optional[Any] = float(input() ) a__ : List[str] = int(input("enter the value to interpolate: " ) ) a__ : Dict = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , lowerCAmelCase__ ): for j in range(n - i ): a__ : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1] a__ : Optional[Any] = y[0][0] for i in range(1 , lowerCAmelCase__ ): summ += (ucal(lowerCAmelCase__ , lowerCAmelCase__ ) * y[0][i]) / math.factorial(lowerCAmelCase__ ) print(F"the value at {value} is {summ}" ) if __name__ == "__main__": main()
251
1
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def _lowercase ( SCREAMING_SNAKE_CASE_ : List[Any] ): """simple docstring""" return x + 2 class UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = """x = 3""" UpperCamelCase = {} UpperCamelCase = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) UpperCamelCase = """x = y""" UpperCamelCase = {"""y""": 5} UpperCamelCase = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 5, """y""": 5} ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = """y = add_two(x)""" UpperCamelCase = {"""x""": 3} UpperCamelCase = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: UpperCamelCase = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result is None assert "tried to execute add_two" in out.out def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = """x = 3""" UpperCamelCase = {} UpperCamelCase = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = """test_dict = {'x': x, 'y': add_two(x)}""" UpperCamelCase = {"""x""": 3} UpperCamelCase = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = """x = 3\ny = 5""" UpperCamelCase = {} UpperCamelCase = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = """text = f'This is x: {x}.'""" UpperCamelCase = {"""x""": 3} UpperCamelCase = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__magic_name__ , {"""x""": 3, """text""": """This is x: 3."""} ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = """if x <= 3:\n y = 2\nelse:\n y = 5""" UpperCamelCase = {"""x""": 3} UpperCamelCase = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 2} ) UpperCamelCase = {"""x""": 8} UpperCamelCase = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 8, """y""": 5} ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = """test_list = [x, add_two(x)]""" UpperCamelCase = {"""x""": 3} UpperCamelCase = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertListEqual(__magic_name__ , [3, 5] ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = """y = x""" UpperCamelCase = {"""x""": 3} UpperCamelCase = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 3} ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = """test_list = [x, add_two(x)]\ntest_list[1]""" UpperCamelCase = {"""x""": 3} UpperCamelCase = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) UpperCamelCase = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" UpperCamelCase = {"""x""": 3} UpperCamelCase = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = """x = 0\nfor i in range(3):\n x = i""" UpperCamelCase = {} UpperCamelCase = evaluate(__magic_name__ , {"""range""": range} , state=__magic_name__ ) assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 2, """i""": 2} )
386
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL __snake_case = logging.get_logger(__name__) def _lowercase ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[int, Iterable[int]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" def constraint_to_multiple_of(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : List[Any]=None ): UpperCamelCase = round(val / multiple ) * multiple if max_val is not None and x > max_val: UpperCamelCase = math.floor(val / multiple ) * multiple if x < min_val: UpperCamelCase = math.ceil(val / multiple ) * multiple return x UpperCamelCase = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else output_size UpperCamelCase , UpperCamelCase = get_image_size(SCREAMING_SNAKE_CASE_ ) UpperCamelCase , UpperCamelCase = output_size # determine new height and width UpperCamelCase = output_height / input_height UpperCamelCase = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width UpperCamelCase = scale_width else: # fit height UpperCamelCase = scale_height UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE_ ) UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE_ ) return (new_height, new_width) class UpperCAmelCase ( __snake_case ): lowercase = ["""pixel_values"""] def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : bool = False , __magic_name__ : int = 1 , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 2_5_5 , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , **__magic_name__ : Tuple , ): """simple docstring""" super().__init__(**__magic_name__ ) UpperCamelCase = size if size is not None else {"""height""": 3_8_4, """width""": 3_8_4} UpperCamelCase = get_size_dict(__magic_name__ ) UpperCamelCase = do_resize UpperCamelCase = size UpperCamelCase = keep_aspect_ratio UpperCamelCase = ensure_multiple_of UpperCamelCase = resample UpperCamelCase = do_rescale UpperCamelCase = rescale_factor UpperCamelCase = do_normalize UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase_ ( self : List[Any] , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : bool = False , __magic_name__ : int = 1 , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : int , ): """simple docstring""" UpperCamelCase = get_size_dict(__magic_name__ ) if "height" not in size or "width" not in size: raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' ) UpperCamelCase = get_resize_output_image_size( __magic_name__ , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=__magic_name__ , multiple=__magic_name__ , ) return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowerCamelCase_ ( self : Any , __magic_name__ : np.ndarray , __magic_name__ : Union[int, float] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : str , ): """simple docstring""" return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowerCamelCase_ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Dict , ): """simple docstring""" return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def lowerCamelCase_ ( self : List[str] , __magic_name__ : ImageInput , __magic_name__ : bool = None , __magic_name__ : int = None , __magic_name__ : bool = None , __magic_name__ : int = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : float = None , __magic_name__ : bool = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : int , ): """simple docstring""" UpperCamelCase = do_resize if do_resize is not None else self.do_resize UpperCamelCase = size if size is not None else self.size UpperCamelCase = get_size_dict(__magic_name__ ) UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of UpperCamelCase = resample if resample is not None else self.resample UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase = image_mean if image_mean is not None else self.image_mean UpperCamelCase = image_std if image_std is not None else self.image_std UpperCamelCase = make_list_of_images(__magic_name__ ) if not valid_images(__magic_name__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. UpperCamelCase = [to_numpy_array(__magic_name__ ) for image in images] if do_resize: UpperCamelCase = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images] if do_rescale: UpperCamelCase = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images] if do_normalize: UpperCamelCase = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images] UpperCamelCase = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images] UpperCamelCase = {"""pixel_values""": images} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ ) def lowerCamelCase_ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[Tuple] = None ): """simple docstring""" UpperCamelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__magic_name__ ) != len(__magic_name__ ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(__magic_name__ ): UpperCamelCase = target_sizes.numpy() UpperCamelCase = [] for idx in range(len(__magic_name__ ) ): UpperCamelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__magic_name__ ) UpperCamelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__magic_name__ ) else: UpperCamelCase = logits.argmax(dim=1 ) UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
386
1
'''simple docstring''' import sys __lowerCamelCase : Union[str, Any] = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __UpperCAmelCase ( __magic_name__ = N )-> Optional[int]: """simple docstring""" snake_case_ : int = -sys.maxsize - 1 for i in range(len(UpperCamelCase__ ) - 12 ): snake_case_ : Tuple = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: snake_case_ : str = product return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
720
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class A_ (unittest.TestCase ): """simple docstring""" a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: a__ = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any: '''simple docstring''' snake_case_ : Optional[Any] = ZeroShotClassificationPipeline( model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) # No kwarg snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" ) self.assertEqual( lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] ) self.assertEqual( lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) snake_case_ : str = classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) # https://github.com/huggingface/transformers/issues/13846 snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] ) self.assertEqual( lowerCAmelCase__ , [ {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} for i in range(1 ) ] , ) snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] ) self.assertEqual( lowerCAmelCase__ , [ {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} for i in range(2 ) ] , ) with self.assertRaises(lowerCAmelCase__ ): classifier("" , candidate_labels="politics" ) with self.assertRaises(lowerCAmelCase__ ): classifier(lowerCAmelCase__ , candidate_labels="politics" ) with self.assertRaises(lowerCAmelCase__ ): classifier("Who are you voting for in 2020?" , candidate_labels="" ) with self.assertRaises(lowerCAmelCase__ ): classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ ) with self.assertRaises(lowerCAmelCase__ ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , ) with self.assertRaises(lowerCAmelCase__ ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , ) self.run_entailment_id(lowerCAmelCase__ ) def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]: '''simple docstring''' snake_case_ : int = zero_shot_classifier.model.config snake_case_ : Optional[int] = config.labelaid snake_case_ : Tuple = zero_shot_classifier.entailment_id snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) snake_case_ : List[str] = original_labelaid self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id ) @require_torch def _A ( self :Tuple ) -> Any: '''simple docstring''' snake_case_ : List[Any] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( "Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] ) @require_torch def _A ( self :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) snake_case_ : int = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def _A ( self :Union[str, Any] ) -> Dict: '''simple docstring''' snake_case_ : List[str] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , ) snake_case_ : Optional[int] = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def _A ( self :Union[str, Any] ) -> int: '''simple docstring''' snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" ) snake_case_ : str = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) snake_case_ : Optional[int] = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def _A ( self :List[str] ) -> str: '''simple docstring''' snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" ) snake_case_ : Optional[Any] = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) snake_case_ : Tuple = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
656
0
'''simple docstring''' import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup lowerCAmelCase_ = logging.get_logger(__name__) class UpperCAmelCase_ ( A__ ): """simple docstring""" def __init__( self , **lowerCamelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["bs4"] ) super().__init__(**a__ ) def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase : Dict = [] UpperCamelCase : Union[str, Any] = [] UpperCamelCase : Dict = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag UpperCamelCase : str = parent.find_all(child.name , recursive=a__ ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(a__ ) else next(i for i, s in enumerate(a__ , 1 ) if s is child ) ) UpperCamelCase : int = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase : int = BeautifulSoup(a__ , "html.parser" ) UpperCamelCase : Union[str, Any] = [] UpperCamelCase : Union[str, Any] = [] UpperCamelCase : Tuple = [] for element in html_code.descendants: if type(a__ ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue UpperCamelCase : List[str] = html.unescape(a__ ).strip() if not text_in_this_tag: continue all_doc_strings.append(a__ ) UpperCamelCase , UpperCamelCase : Optional[int] = self.xpath_soup(a__ ) stringaxtag_seq.append(a__ ) stringaxsubs_seq.append(a__ ) if len(a__ ) != len(a__ ): raise ValueError("Number of doc strings and xtags does not correspond" ) if len(a__ ) != len(a__ ): raise ValueError("Number of doc strings and xsubs does not correspond" ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase ) -> str: '''simple docstring''' UpperCamelCase : Optional[Any] = "" for tagname, subs in zip(a__ , a__ ): xpath += f'''/{tagname}''' if subs != 0: xpath += f'''[{subs}]''' return xpath def __call__( self , lowerCamelCase ) -> Any: '''simple docstring''' UpperCamelCase : Any = False # Check that strings has a valid type if isinstance(a__ , a__ ): UpperCamelCase : str = True elif isinstance(a__ , (list, tuple) ): if len(a__ ) == 0 or isinstance(html_strings[0] , a__ ): UpperCamelCase : Tuple = True if not valid_strings: raise ValueError( "HTML strings must of type `str`, `List[str]` (batch of examples), " f'''but is of type {type(a__ )}.''' ) UpperCamelCase : Union[str, Any] = bool(isinstance(a__ , (list, tuple) ) and (isinstance(html_strings[0] , a__ )) ) if not is_batched: UpperCamelCase : Optional[Any] = [html_strings] # Get nodes + xpaths UpperCamelCase : Any = [] UpperCamelCase : List[Any] = [] for html_string in html_strings: UpperCamelCase , UpperCamelCase , UpperCamelCase : int = self.get_three_from_single(a__ ) nodes.append(a__ ) UpperCamelCase : Union[str, Any] = [] for node, tag_list, sub_list in zip(a__ , a__ , a__ ): UpperCamelCase : List[Any] = self.construct_xpath(a__ , a__ ) xpath_strings.append(a__ ) xpaths.append(a__ ) # return as Dict UpperCamelCase : Union[str, Any] = {"nodes": nodes, "xpaths": xpaths} UpperCamelCase : Union[str, Any] = BatchFeature(data=a__ , tensor_type=a__ ) return encoded_inputs
173
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets _lowercase = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n" _lowercase = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n" _lowercase = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): def snake_case_ ( self): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ] , ) def snake_case_ ( self , a__ , a__ , a__=None , a__=None , a__=None , a__=None , a__="auto" , a__=-1 , a__=0.9 , a__=5 , a__=5_0_0 , a__="gpt2-large" , a__=-1 , a__=1_0_2_4 , a__=2_5 , a__=5 , a__=True , a__=2_5 , ): A__ = compute_mauve( p_text=a__ , q_text=a__ , p_features=a__ , q_features=a__ , p_tokens=a__ , q_tokens=a__ , num_buckets=a__ , pca_max_data=a__ , kmeans_explained_var=a__ , kmeans_num_redo=a__ , kmeans_max_iter=a__ , featurize_model_name=a__ , device_id=a__ , max_text_length=a__ , divergence_curve_discretization_size=a__ , mauve_scaling_factor=a__ , verbose=a__ , seed=a__ , ) return out
632
0
from datetime import datetime as dt import os from github import Github lowercase_ : str = [ 'good first issue', 'good second issue', 'good difficult issue', 'feature request', 'new model', 'wip', ] def A__ ( ): SCREAMING_SNAKE_CASE__: Tuple= Github(os.environ['''GITHUB_TOKEN'''] ) SCREAMING_SNAKE_CASE__: Optional[int]= g.get_repo('''huggingface/transformers''' ) SCREAMING_SNAKE_CASE__: Tuple= repo.get_issues(state='''open''' ) for issue in open_issues: SCREAMING_SNAKE_CASE__: Optional[int]= sorted([comment for comment in issue.get_comments()] , key=lambda snake_case_ : i.created_at , reverse=snake_case_ ) SCREAMING_SNAKE_CASE__: Dict= comments[0] if len(snake_case_ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
107
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
107
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE = ['ReformerTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE = ['ReformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE = [ 'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ReformerAttention', 'ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerLayer', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
94
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __snake_case : List[str] = logging.get_logger(__name__) def a_ ( __a ): A__ = DPTConfig() if "large" in checkpoint_url: A__ = 1024 A__ = 4096 A__ = 24 A__ = 16 A__ = [5, 11, 17, 23] A__ = [256, 512, 1024, 1024] A__ = (1, 384, 384) if "ade" in checkpoint_url: A__ = True A__ = 150 A__ = '''huggingface/label-files''' A__ = '''ade20k-id2label.json''' A__ = json.load(open(cached_download(hf_hub_url(__a , __a , repo_type='''dataset''' ) ) , '''r''' ) ) A__ = {int(__a ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} A__ = [1, 150, 480, 480] return config, expected_shape def a_ ( __a ): A__ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias'''] for k in ignore_keys: state_dict.pop(__a , __a ) def a_ ( __a ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): A__ = name.replace('''pretrained.model''' , '''dpt.encoder''' ) if "pretrained.model" in name: A__ = name.replace('''pretrained.model''' , '''dpt.embeddings''' ) if "patch_embed" in name: A__ = name.replace('''patch_embed''' , '''patch_embeddings''' ) if "pos_embed" in name: A__ = name.replace('''pos_embed''' , '''position_embeddings''' ) if "attn.proj" in name: A__ = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "proj" in name and "project" not in name: A__ = name.replace('''proj''' , '''projection''' ) if "blocks" in name: A__ = name.replace('''blocks''' , '''layer''' ) if "mlp.fc1" in name: A__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: A__ = name.replace('''mlp.fc2''' , '''output.dense''' ) if "norm1" in name: A__ = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: A__ = name.replace('''norm2''' , '''layernorm_after''' ) if "scratch.output_conv" in name: A__ = name.replace('''scratch.output_conv''' , '''head''' ) if "scratch" in name: A__ = name.replace('''scratch''' , '''neck''' ) if "layer1_rn" in name: A__ = name.replace('''layer1_rn''' , '''convs.0''' ) if "layer2_rn" in name: A__ = name.replace('''layer2_rn''' , '''convs.1''' ) if "layer3_rn" in name: A__ = name.replace('''layer3_rn''' , '''convs.2''' ) if "layer4_rn" in name: A__ = name.replace('''layer4_rn''' , '''convs.3''' ) if "refinenet" in name: A__ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 A__ = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: A__ = name.replace('''out_conv''' , '''projection''' ) if "resConfUnit1" in name: A__ = name.replace('''resConfUnit1''' , '''residual_layer1''' ) if "resConfUnit2" in name: A__ = name.replace('''resConfUnit2''' , '''residual_layer2''' ) if "conv1" in name: A__ = name.replace('''conv1''' , '''convolution1''' ) if "conv2" in name: A__ = name.replace('''conv2''' , '''convolution2''' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: A__ = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' ) if "pretrained.act_postprocess2.0.project.0" in name: A__ = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' ) if "pretrained.act_postprocess3.0.project.0" in name: A__ = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' ) if "pretrained.act_postprocess4.0.project.0" in name: A__ = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' ) # resize blocks if "pretrained.act_postprocess1.3" in name: A__ = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' ) if "pretrained.act_postprocess1.4" in name: A__ = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' ) if "pretrained.act_postprocess2.3" in name: A__ = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' ) if "pretrained.act_postprocess2.4" in name: A__ = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' ) if "pretrained.act_postprocess3.3" in name: A__ = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' ) if "pretrained.act_postprocess4.3" in name: A__ = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' ) if "pretrained.act_postprocess4.4" in name: A__ = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' ) if "pretrained" in name: A__ = name.replace('''pretrained''' , '''dpt''' ) if "bn" in name: A__ = name.replace('''bn''' , '''batch_norm''' ) if "head" in name: A__ = name.replace('''head''' , '''head.head''' ) if "encoder.norm" in name: A__ = name.replace('''encoder.norm''' , '''layernorm''' ) if "auxlayer" in name: A__ = name.replace('''auxlayer''' , '''auxiliary_head.head''' ) return name def a_ ( __a , __a ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) A__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[: config.hidden_size, :] A__ = in_proj_bias[: config.hidden_size] A__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ = in_proj_weight[ -config.hidden_size :, : ] A__ = in_proj_bias[-config.hidden_size :] def a_ ( ): A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A__ = Image.open(requests.get(__a , stream=__a ).raw ) return im @torch.no_grad() def a_ ( __a , __a , __a , __a ): A__ , A__ = get_dpt_config(__a ) # load original state_dict from URL A__ = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' ) # remove certain keys remove_ignore_keys_(__a ) # rename keys for key in state_dict.copy().keys(): A__ = state_dict.pop(__a ) A__ = val # read in qkv matrices read_in_q_k_v(__a , __a ) # load HuggingFace model A__ = DPTForSemanticSegmentation(__a ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__a ) model.load_state_dict(__a ) model.eval() # Check outputs on an image A__ = 480 if '''ade''' in checkpoint_url else 384 A__ = DPTImageProcessor(size=__a ) A__ = prepare_img() A__ = image_processor(__a , return_tensors='''pt''' ) # forward pass A__ = model(**__a ).logits if '''ade''' in checkpoint_url else model(**__a ).predicted_depth # Assert logits A__ = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] ) if "ade" in checkpoint_url: A__ = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] ) assert outputs.shape == torch.Size(__a ) assert ( torch.allclose(outputs[0, 0, :3, :3] , __a , atol=1e-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , __a ) ) Path(__a ).mkdir(exist_ok=__a ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__a ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__a ) if push_to_hub: print('''Pushing model to hub...''' ) model.push_to_hub( repo_path_or_name=Path(__a , __a ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__a , ) image_processor.push_to_hub( repo_path_or_name=Path(__a , __a ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__a , ) if __name__ == "__main__": __snake_case : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) __snake_case : Dict = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
571
0
def __magic_name__ ( lowercase_ ) -> bool: '''simple docstring''' return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
718
import os import jsonlines import numpy as np from tqdm import tqdm __a : int = 2_0_4_8 __a : Optional[int] = 4_0_9_6 __a : Optional[int] = 4_2 __a : Optional[Any] = os.environ.pop("""PROCESS_TRAIN""", """false""") __a : Dict = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4} def __magic_name__ ( lowercase_ ) -> List[Any]: '''simple docstring''' def choose_first(lowercase_ , lowercase_=False ): assert isinstance(lowercase_ , lowercase_ ) if len(lowercase_ ) == 1: UpperCamelCase = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: UpperCamelCase = {k: [a[k]] for k in a} if len(a["start_token"] ) > 0: break return a UpperCamelCase = {"id": example["id"]} UpperCamelCase = example["annotations"] UpperCamelCase = annotation["yes_no_answer"] if 0 in yes_no_answer or 1 in yes_no_answer: UpperCamelCase = ["yes"] if 1 in yes_no_answer else ["no"] UpperCamelCase = UpperCamelCase = [] UpperCamelCase = UpperCamelCase = [] UpperCamelCase = ["<cls>"] else: UpperCamelCase = ["short"] UpperCamelCase = choose_first(annotation["short_answers"] ) if len(out["start_token"] ) == 0: # answer will be long if short is not available UpperCamelCase = ["long"] UpperCamelCase = choose_first(annotation["long_answer"] , is_long_answer=lowercase_ ) UpperCamelCase = [] answer.update(lowercase_ ) # disregard some samples if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]: UpperCamelCase = True else: UpperCamelCase = False UpperCamelCase = ["start_token", "end_token", "start_byte", "end_byte", "text"] if not all(isinstance(answer[k] , lowercase_ ) for k in cols ): raise ValueError("Issue in ID" , example["id"] ) return answer def __magic_name__ ( lowercase_ , lowercase_=False ) -> Optional[int]: '''simple docstring''' UpperCamelCase = _get_single_answer(lowercase_ ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element UpperCamelCase = example["document"]["tokens"] UpperCamelCase = [] for i in range(len(doc["token"] ) ): if not doc["is_html"][i]: context.append(doc["token"][i] ) return { "context": " ".join(lowercase_ ), "answer": { "start_token": -100, # ignore index in cross-entropy "end_token": -100, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples UpperCamelCase = ["start_token", "end_token"] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 UpperCamelCase = example["document"]["tokens"] UpperCamelCase = answer["start_token"] UpperCamelCase = answer["end_token"] UpperCamelCase = [] for i in range(len(doc["token"] ) ): if not doc["is_html"][i]: context.append(doc["token"][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 UpperCamelCase = " ".join(context[start_token:end_token] ) # checking above code if assertion: UpperCamelCase = doc["is_html"][answer["start_token"] : answer["end_token"]] UpperCamelCase = doc["token"][answer["start_token"] : answer["end_token"]] UpperCamelCase = " ".join([old[i] for i in range(len(lowercase_ ) ) if not is_html[i]] ) if new != old: print("ID:" , example["id"] ) print("New:" , lowercase_ , end="\n" ) print("Old:" , lowercase_ , end="\n\n" ) return { "context": " ".join(lowercase_ ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=2048 , lowercase_=4096 , lowercase_=True ) -> int: '''simple docstring''' UpperCamelCase = get_context_and_ans(lowercase_ , assertion=lowercase_ ) UpperCamelCase = out["answer"] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } UpperCamelCase = tokenizer(example["question"]["text"] , out["context"] ).input_ids UpperCamelCase = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element UpperCamelCase = [] UpperCamelCase = [] UpperCamelCase = input_ids[:q_len] UpperCamelCase = range(lowercase_ , len(lowercase_ ) , max_length - doc_stride ) for i in doc_start_indices: UpperCamelCase = i + max_length - q_len UpperCamelCase = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer["category"][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-100] * len(lowercase_ ), "end_token": [-100] * len(lowercase_ ), "category": category, }, } UpperCamelCase = out["context"].split() UpperCamelCase = splitted_context[answer["end_token"]] UpperCamelCase = len( tokenizer( " ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=lowercase_ , ).input_ids ) UpperCamelCase = len( tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=lowercase_ ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token UpperCamelCase = len(tokenizer(lowercase_ , add_special_tokens=lowercase_ ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 UpperCamelCase = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive UpperCamelCase = answer["start_token"] UpperCamelCase = answer["end_token"] if assertion: UpperCamelCase = tokenizer.decode(lowercase_ ) if answer["span"] != new: print("ISSUE IN TOKENIZATION" ) print("OLD:" , answer["span"] ) print("NEW:" , lowercase_ , end="\n\n" ) if len(lowercase_ ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } UpperCamelCase = input_ids[:q_len] UpperCamelCase = range(lowercase_ , len(lowercase_ ) , max_length - doc_stride ) UpperCamelCase = [] UpperCamelCase = [] UpperCamelCase = [] UpperCamelCase = [] # null, yes, no, long, short for i in doc_start_indices: UpperCamelCase = i + max_length - q_len UpperCamelCase = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: UpperCamelCase = start_token - i + q_len UpperCamelCase = end_token - i + q_len answers_category.append(answer["category"][0] ) # ["short"] -> "short" else: UpperCamelCase = -100 UpperCamelCase = -100 answers_category.append("null" ) UpperCamelCase = inputs[-1][start_token : end_token + 1] answers_start_token.append(lowercase_ ) answers_end_token.append(lowercase_ ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print("ISSUE in strided for ID:" , example["id"] ) print("New:" , tokenizer.decode(lowercase_ ) ) print("Old:" , tokenizer.decode(lowercase_ ) , end="\n\n" ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=2048 , lowercase_=4096 , lowercase_=False ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = get_strided_contexts_and_ans( lowercase_ , lowercase_ , doc_stride=lowercase_ , max_length=lowercase_ , assertion=lowercase_ , ) return example def __magic_name__ ( lowercase_ , lowercase_ ) -> Any: '''simple docstring''' with jsonlines.open(lowercase_ , "a" ) as writer: for example in tqdm(lowercase_ , total=len(lowercase_ ) , desc="Saving samples ... " ): UpperCamelCase = example["labels"] for ids, start, end, cat in zip( example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { "input_ids": ids, "start_token": start, "end_token": end, "category": CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer __a : Dict = load_dataset("""natural_questions""") __a : int = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""") __a : List[Any] = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""] __a : Tuple = { """tokenizer""": tokenizer, """doc_stride""": DOC_STRIDE, """max_length""": MAX_LENGTH, """assertion""": False, } __a : int = data.map(prepare_inputs, fn_kwargs=fn_kwargs) __a : Optional[int] = data.remove_columns(["""annotations""", """document""", """id""", """question"""]) print(data) np.random.seed(SEED) __a : List[Any] = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl""" save_to_disk(data, file_name=cache_file_name)
414
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING A__: Any = logging.get_logger(__name__) A__: Any = { '''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''', } class _a ( UpperCamelCase__): """simple docstring""" UpperCamelCase__ = """blip_2_vision_model""" def __init__( self: Tuple , __lowerCamelCase: List[Any]=1408 , __lowerCamelCase: Optional[Any]=6144 , __lowerCamelCase: List[Any]=39 , __lowerCamelCase: Dict=16 , __lowerCamelCase: Optional[int]=224 , __lowerCamelCase: int=14 , __lowerCamelCase: str="gelu" , __lowerCamelCase: Any=0.00_001 , __lowerCamelCase: int=0.0 , __lowerCamelCase: Dict=1e-10 , __lowerCamelCase: Any=True , **__lowerCamelCase: Dict , ): '''simple docstring''' super().__init__(**__lowerCamelCase ) UpperCamelCase__: Tuple = hidden_size UpperCamelCase__: List[Any] = intermediate_size UpperCamelCase__: Union[str, Any] = num_hidden_layers UpperCamelCase__: Optional[Any] = num_attention_heads UpperCamelCase__: List[Any] = patch_size UpperCamelCase__: Tuple = image_size UpperCamelCase__: List[str] = initializer_range UpperCamelCase__: List[Any] = attention_dropout UpperCamelCase__: Optional[int] = layer_norm_eps UpperCamelCase__: Any = hidden_act UpperCamelCase__: Optional[Any] = qkv_bias @classmethod def UpperCAmelCase_ ( cls: Tuple , __lowerCamelCase: Union[str, os.PathLike] , **__lowerCamelCase: Optional[int] ): '''simple docstring''' cls._set_token_in_kwargs(__lowerCamelCase ) UpperCamelCase__ , UpperCamelCase__: Optional[Any] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase ) # get the vision config dict if we are loading from Blip2Config if config_dict.get("model_type" ) == "blip-2": UpperCamelCase__: Union[str, Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"You are using a model of type {config_dict['model_type']} to instantiate a model of type " F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(__lowerCamelCase , **__lowerCamelCase ) class _a ( UpperCamelCase__): """simple docstring""" UpperCamelCase__ = """blip_2_qformer""" def __init__( self: List[Any] , __lowerCamelCase: str=3_0522 , __lowerCamelCase: List[str]=768 , __lowerCamelCase: Any=12 , __lowerCamelCase: Optional[Any]=12 , __lowerCamelCase: List[Any]=3072 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: Union[str, Any]=0.1 , __lowerCamelCase: int=512 , __lowerCamelCase: int=0.02 , __lowerCamelCase: Optional[int]=1e-12 , __lowerCamelCase: Tuple=0 , __lowerCamelCase: List[Any]="absolute" , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=1408 , **__lowerCamelCase: Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase ) UpperCamelCase__: Dict = vocab_size UpperCamelCase__: Tuple = hidden_size UpperCamelCase__: Dict = num_hidden_layers UpperCamelCase__: Tuple = num_attention_heads UpperCamelCase__: Any = hidden_act UpperCamelCase__: Any = intermediate_size UpperCamelCase__: List[str] = hidden_dropout_prob UpperCamelCase__: List[Any] = attention_probs_dropout_prob UpperCamelCase__: Tuple = max_position_embeddings UpperCamelCase__: Optional[Any] = initializer_range UpperCamelCase__: Dict = layer_norm_eps UpperCamelCase__: Optional[int] = position_embedding_type UpperCamelCase__: Optional[Any] = cross_attention_frequency UpperCamelCase__: Optional[int] = encoder_hidden_size @classmethod def UpperCAmelCase_ ( cls: int , __lowerCamelCase: Union[str, os.PathLike] , **__lowerCamelCase: Optional[int] ): '''simple docstring''' cls._set_token_in_kwargs(__lowerCamelCase ) UpperCamelCase__ , UpperCamelCase__: Tuple = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get("model_type" ) == "blip-2": UpperCamelCase__: str = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"You are using a model of type {config_dict['model_type']} to instantiate a model of type " F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(__lowerCamelCase , **__lowerCamelCase ) class _a ( UpperCamelCase__): """simple docstring""" UpperCamelCase__ = """blip-2""" UpperCamelCase__ = True def __init__( self: Union[str, Any] , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Any=None , __lowerCamelCase: int=32 , **__lowerCamelCase: str ): '''simple docstring''' super().__init__(**__lowerCamelCase ) if vision_config is None: UpperCamelCase__: Dict = {} logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." ) if qformer_config is None: UpperCamelCase__: int = {} logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." ) if text_config is None: UpperCamelCase__: Any = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." ) UpperCamelCase__: Tuple = BlipaVisionConfig(**__lowerCamelCase ) UpperCamelCase__: int = BlipaQFormerConfig(**__lowerCamelCase ) UpperCamelCase__: List[Any] = text_config["model_type"] if "model_type" in text_config else "opt" UpperCamelCase__: Optional[Any] = CONFIG_MAPPING[text_model_type](**__lowerCamelCase ) UpperCamelCase__: List[Any] = self.text_config.tie_word_embeddings UpperCamelCase__: Any = self.text_config.is_encoder_decoder UpperCamelCase__: List[Any] = num_query_tokens UpperCamelCase__: Any = self.vision_config.hidden_size UpperCamelCase__: Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES UpperCamelCase__: Optional[int] = 1.0 UpperCamelCase__: Tuple = 0.02 @classmethod def UpperCAmelCase_ ( cls: int , __lowerCamelCase: BlipaVisionConfig , __lowerCamelCase: BlipaQFormerConfig , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: str , ): '''simple docstring''' return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__lowerCamelCase , ) def UpperCAmelCase_ ( self: Dict ): '''simple docstring''' UpperCamelCase__: Any = copy.deepcopy(self.__dict__ ) UpperCamelCase__: Any = self.vision_config.to_dict() UpperCamelCase__: Optional[Any] = self.qformer_config.to_dict() UpperCamelCase__: Optional[int] = self.text_config.to_dict() UpperCamelCase__: Any = self.__class__.model_type return output
380
def lowerCAmelCase_ ( A_): UpperCamelCase__: Optional[int] = len(A_) for i in range(1 ,A_): UpperCamelCase__: List[Any] = collection[i] UpperCamelCase__: Tuple = 0 UpperCamelCase__: Union[str, Any] = i - 1 while low <= high: UpperCamelCase__: Any = (low + high) // 2 if val < collection[mid]: UpperCamelCase__: str = mid - 1 else: UpperCamelCase__: str = mid + 1 for j in range(A_ ,A_ ,-1): UpperCamelCase__: Optional[int] = collection[j - 1] UpperCamelCase__: Optional[Any] = val return collection if __name__ == "__main__": A__: Dict = input('''Enter numbers separated by a comma:\n''').strip() A__: Optional[int] = [int(item) for item in user_input.split(''',''')] print(binary_insertion_sort(unsorted))
380
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __SCREAMING_SNAKE_CASE (__lowerCamelCase ): """simple docstring""" __a =['image_processor', 'tokenizer'] __a ='LayoutLMv3ImageProcessor' __a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__( self : Any , __a : Any=None , __a : Tuple=None , **__a : Any ): _a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase_ , ) _a = kwargs.pop("feature_extractor" ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self : Dict , __a : Optional[int] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : List[str] , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor _a = self.image_processor(images=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): _a = [text] # add batch dimension (as the image processor always adds a batch dimension) _a = features['words'] _a = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , ) # add pixel values _a = features.pop("pixel_values" ) if return_overflowing_tokens is True: _a = self.get_overflowing_images(UpperCAmelCase_ , encoded_inputs["overflow_to_sample_mapping"] ) _a = images return encoded_inputs def UpperCamelCase__ ( self : Dict , __a : Union[str, Any] , __a : List[str] ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image _a = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f' {len(UpperCAmelCase_ )} and {len(UpperCAmelCase_ )}' ) return images_with_overflow def UpperCamelCase__ ( self : Tuple , *__a : str , **__a : int ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def UpperCamelCase__ ( self : Optional[Any] , *__a : Any , **__a : Optional[Any] ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def UpperCamelCase__ ( self : Dict ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def UpperCamelCase__ ( self : List[Any] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase_ , ) return self.image_processor_class @property def UpperCamelCase__ ( self : Any ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase_ , ) return self.image_processor
706
'''simple docstring''' import torch from diffusers import StableDiffusionPipeline lowerCAmelCase_ : Any = 'path-to-your-trained-model' lowerCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda') lowerCAmelCase_ : Optional[Any] = 'A photo of sks dog in a bucket' lowerCAmelCase_ : Tuple = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save('dog-bucket.png')
521
0
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def a ( self : Any )-> int: """simple docstring""" UpperCAmelCase_ : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] UpperCAmelCase_ : Union[str, Any] = DisjunctiveConstraint(a_ ) self.assertTrue(isinstance(dc.token_ids , a_ ) ) with self.assertRaises(a_ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(a_ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def a ( self : List[str] )-> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(a_ ): DisjunctiveConstraint(a_ ) # fails here def a ( self : Dict )-> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] UpperCAmelCase_ : Dict = DisjunctiveConstraint(a_ ) UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ : int = dc.update(1 ) UpperCAmelCase_ : str = stepped is True and completed is False and reset is False self.assertTrue(a_ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ : Optional[int] = dc.update(2 ) UpperCAmelCase_ : Tuple = stepped is True and completed is False and reset is False self.assertTrue(a_ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ : Union[str, Any] = dc.update(3 ) UpperCAmelCase_ : str = stepped is True and completed is True and reset is False self.assertTrue(a_ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def a ( self : List[Any] )-> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] UpperCAmelCase_ : Tuple = DisjunctiveConstraint(a_ ) UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ : Tuple = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ : List[str] = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ : str = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ : Any = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ : Any = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ : List[str] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
470
"""simple docstring""" def A_ ( lowercase ) -> int: """simple docstring""" return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def A_ ( lowercase ) -> bool: """simple docstring""" UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Union[str, Any] = number while duplicate > 0: UpperCAmelCase_ ,UpperCAmelCase_ : Tuple = divmod(lowercase , 10 ) fact_sum += factorial(lowercase ) return fact_sum == number if __name__ == "__main__": print("Program to check whether a number is a Krisnamurthy Number or not.") lowercase_ = int(input("Enter number: ").strip()) print( f"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.""" )
470
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowercase_ = 250_004 lowercase_ = 250_020 @require_sentencepiece @require_tokenizers class __A ( A , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : List[str] = MBartTokenizer __lowerCamelCase : List[Any] = MBartTokenizerFast __lowerCamelCase : List[Any] = True __lowerCamelCase : List[str] = True def a__ (self ) -> Dict: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _a = MBartTokenizer(A , keep_accents=A ) tokenizer.save_pretrained(self.tmpdirname ) def a__ (self ) -> Tuple: """simple docstring""" _a = MBartTokenizer(A , keep_accents=A ) _a = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _a = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual( A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) _a = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def a__ (self ) -> Tuple: """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _a = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _a = self.rust_tokenizer_class.from_pretrained(A , **A ) _a = self.tokenizer_class.from_pretrained(A , **A ) _a = tempfile.mkdtemp() _a = tokenizer_r.save_pretrained(A ) _a = tokenizer_p.save_pretrained(A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) _a = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way _a = tokenizer_r.from_pretrained(A ) _a = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=True _a = tempfile.mkdtemp() _a = tokenizer_r.save_pretrained(A , legacy_format=A ) _a = tokenizer_p.save_pretrained(A ) # Checks it save with the same files self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way _a = tokenizer_r.from_pretrained(A ) _a = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=False _a = tempfile.mkdtemp() _a = tokenizer_r.save_pretrained(A , legacy_format=A ) _a = tokenizer_p.save_pretrained(A ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _a = tokenizer_r.from_pretrained(A ) _a = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) @require_torch @require_sentencepiece @require_tokenizers class __A ( unittest.TestCase ): '''simple docstring''' __lowerCamelCase : int = 'facebook/mbart-large-en-ro' __lowerCamelCase : Optional[int] = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] __lowerCamelCase : List[Any] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] __lowerCamelCase : str = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE] @classmethod def a__ (cls ) -> Any: """simple docstring""" _a = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) _a = 1 return cls def a__ (self ) -> Dict: """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250_001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250_004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250_020 ) def a__ (self ) -> List[str]: """simple docstring""" _a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , A ) def a__ (self ) -> Tuple: """simple docstring""" self.assertIn(A , self.tokenizer.all_special_ids ) _a = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2] _a = self.tokenizer.decode(A , skip_special_tokens=A ) _a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A ) self.assertEqual(A , A ) self.assertNotIn(self.tokenizer.eos_token , A ) def a__ (self ) -> Optional[Any]: """simple docstring""" _a = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , A ) _a = 10 _a = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , A ) self.assertEqual(len(A ) , A ) def a__ (self ) -> List[str]: """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250_026, 250_001] ) def a__ (self ) -> Optional[int]: """simple docstring""" _a = tempfile.mkdtemp() _a = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(A ) _a = MBartTokenizer.from_pretrained(A ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A ) @require_torch def a__ (self ) -> Tuple: """simple docstring""" _a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors='''pt''' ) _a = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def a__ (self ) -> Optional[int]: """simple docstring""" _a = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) _a = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(A , A ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _a = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , A ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def a__ (self ) -> Union[str, Any]: """simple docstring""" _a = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors='''pt''' ) _a = self.tokenizer( text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors='''pt''' ) _a = targets['''input_ids'''] _a = shift_tokens_right(A , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a__ (self ) -> List[Any]: """simple docstring""" _a = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(A ) , { # A, test, EOS, en_XX '''input_ids''': [[62, 3_034, 2, 250_004]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 250_001, } , )
716
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __A ( unittest.TestCase ): '''simple docstring''' def a__ (self ) -> Optional[int]: """simple docstring""" _a = [[1, 2, 4], [1, 2, 3, 4]] _a = DisjunctiveConstraint(A ) self.assertTrue(isinstance(dc.token_ids , A ) ) with self.assertRaises(A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def a__ (self ) -> Any: """simple docstring""" _a = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(A ): DisjunctiveConstraint(A ) # fails here def a__ (self ) -> Dict: """simple docstring""" _a = [[1, 2, 3], [1, 2, 4]] _a = DisjunctiveConstraint(A ) _a , _a , _a = dc.update(1 ) _a = stepped is True and completed is False and reset is False self.assertTrue(A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) _a , _a , _a = dc.update(2 ) _a = stepped is True and completed is False and reset is False self.assertTrue(A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) _a , _a , _a = dc.update(3 ) _a = stepped is True and completed is True and reset is False self.assertTrue(A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def a__ (self ) -> List[Any]: """simple docstring""" _a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] _a = DisjunctiveConstraint(A ) _a , _a , _a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) _a , _a , _a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) _a , _a , _a = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) _a , _a , _a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() _a , _a , _a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) _a , _a , _a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) _a , _a , _a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
352
0
'''simple docstring''' def lowercase_ ( __A : str , __A : str = " " ) -> list: """simple docstring""" lowercase : List[str] =[] lowercase : Union[str, Any] =0 for index, char in enumerate(__A ): if char == separator: split_words.append(string[last_index:index] ) lowercase : Optional[int] =index + 1 elif index + 1 == len(__A ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
94
'''simple docstring''' import unittest import numpy as np def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray | None = None , ): '''simple docstring''' UpperCAmelCase__ = np.shape(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = np.shape(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = np.shape(SCREAMING_SNAKE_CASE__ ) if shape_a[0] != shape_b[0]: UpperCAmelCase__ = ( """Expected the same number of rows for A and B. """ F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) if shape_b[1] != shape_c[1]: UpperCAmelCase__ = ( """Expected the same number of columns for B and C. """ F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = pseudo_inv if a_inv is None: try: UpperCAmelCase__ = np.linalg.inv(SCREAMING_SNAKE_CASE__ ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase__ = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase__ = np.array([[2, 1], [6, 3]] ) UpperCAmelCase__ = schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = np.block([[a, b], [b.T, c]] ) UpperCAmelCase__ = np.linalg.det(_UpperCAmelCase ) UpperCAmelCase__ = np.linalg.det(_UpperCAmelCase ) UpperCAmelCase__ = np.linalg.det(_UpperCAmelCase ) self.assertAlmostEqual(_UpperCAmelCase , det_a * det_s ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase__ = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase__ = np.array([[2, 1], [6, 3]] ) with self.assertRaises(_UpperCAmelCase ): schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase__ = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase__ = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(_UpperCAmelCase ): schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
603
0
from __future__ import annotations from collections import deque class UpperCamelCase : def __init__( self : Optional[Any] , snake_case__ : list[str] ): """simple docstring""" SCREAMING_SNAKE_CASE = [] self.adlist.append( {'value': '', 'next_states': [], 'fail_state': 0, 'output': []} ) for keyword in keywords: self.add_keyword(snake_case__ ) self.set_fail_transitions() def UpperCamelCase ( self : List[Any] , snake_case__ : int , snake_case__ : str ): """simple docstring""" for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def UpperCamelCase ( self : str , snake_case__ : str ): """simple docstring""" SCREAMING_SNAKE_CASE = 0 for character in keyword: SCREAMING_SNAKE_CASE = self.find_next_state(snake_case__ , snake_case__ ) if next_state is None: self.adlist.append( { 'value': character, 'next_states': [], 'fail_state': 0, 'output': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) SCREAMING_SNAKE_CASE = len(self.adlist ) - 1 else: SCREAMING_SNAKE_CASE = next_state self.adlist[current_state]["output"].append(snake_case__ ) def UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE = deque() for node in self.adlist[0]["next_states"]: q.append(snake_case__ ) SCREAMING_SNAKE_CASE = 0 while q: SCREAMING_SNAKE_CASE = q.popleft() for child in self.adlist[r]["next_states"]: q.append(snake_case__ ) SCREAMING_SNAKE_CASE = self.adlist[r]['fail_state'] while ( self.find_next_state(snake_case__ , self.adlist[child]['value'] ) is None and state != 0 ): SCREAMING_SNAKE_CASE = self.adlist[state]['fail_state'] SCREAMING_SNAKE_CASE = self.find_next_state( snake_case__ , self.adlist[child]['value'] ) if self.adlist[child]["fail_state"] is None: SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = ( self.adlist[child]['output'] + self.adlist[self.adlist[child]['fail_state']]['output'] ) def UpperCamelCase ( self : List[str] , snake_case__ : str ): """simple docstring""" SCREAMING_SNAKE_CASE = {} # returns a dict with keywords and list of its occurrences SCREAMING_SNAKE_CASE = 0 for i in range(len(snake_case__ ) ): while ( self.find_next_state(snake_case__ , string[i] ) is None and current_state != 0 ): SCREAMING_SNAKE_CASE = self.adlist[current_state]['fail_state'] SCREAMING_SNAKE_CASE = self.find_next_state(snake_case__ , string[i] ) if next_state is None: SCREAMING_SNAKE_CASE = 0 else: SCREAMING_SNAKE_CASE = next_state for key in self.adlist[current_state]["output"]: if key not in result: SCREAMING_SNAKE_CASE = [] result[key].append(i - len(snake_case__ ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
673
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): __UpperCamelCase =AudioLDMPipeline __UpperCamelCase =TEXT_TO_AUDIO_PARAMS __UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS __UpperCamelCase =frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def UpperCamelCase ( self : Optional[Any] ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , ) SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , ) SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ ) SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 ) SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , ) SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ) SCREAMING_SNAKE_CASE = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ): """simple docstring""" if str(snake_case__ ).startswith('mps' ): SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ ) else: SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) SCREAMING_SNAKE_CASE = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ ) audioldm_pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ ) SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ) SCREAMING_SNAKE_CASE = output.audios[0] assert audio.ndim == 1 assert len(snake_case__ ) == 2_5_6 SCREAMING_SNAKE_CASE = audio[:1_0] SCREAMING_SNAKE_CASE = np.array( [-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def UpperCamelCase ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ ) audioldm_pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ ) SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']] # forward SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ) SCREAMING_SNAKE_CASE = output.audios[0] SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ ) SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )] SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer( snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , ) SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder( snake_case__ , ) SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 ) SCREAMING_SNAKE_CASE = prompt_embeds # forward SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ) SCREAMING_SNAKE_CASE = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def UpperCamelCase ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ ) audioldm_pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ ) SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt'] SCREAMING_SNAKE_CASE = negative_prompt SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']] # forward SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ) SCREAMING_SNAKE_CASE = output.audios[0] SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ ) SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )] SCREAMING_SNAKE_CASE = [] for p in [prompt, negative_prompt]: SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer( snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , ) SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder( snake_case__ , ) SCREAMING_SNAKE_CASE = text_embeds.text_embeds # additional L_2 normalization over each hidden-state SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 ) embeds.append(snake_case__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds # forward SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ) SCREAMING_SNAKE_CASE = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def UpperCamelCase ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ ) SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ ) audioldm_pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ ) SCREAMING_SNAKE_CASE = 'egg cracking' SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ ) SCREAMING_SNAKE_CASE = output.audios[0] assert audio.ndim == 1 assert len(snake_case__ ) == 2_5_6 SCREAMING_SNAKE_CASE = audio[:1_0] SCREAMING_SNAKE_CASE = np.array( [-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def UpperCamelCase ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ ) SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ ) audioldm_pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios assert audios.shape == (1, 2_5_6) # test num_waveforms_per_prompt=1 (default) for batch of prompts SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 2_5_6) # test num_waveforms_per_prompt for single prompt SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios assert audios.shape == (num_waveforms_per_prompt, 2_5_6) # test num_waveforms_per_prompt for batch of prompts SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6) def UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ ) audioldm_pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ ) SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ ) SCREAMING_SNAKE_CASE = output.audios[0] assert audio.ndim == 1 assert len(snake_case__ ) / vocoder_sampling_rate == 0.016 SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ ) SCREAMING_SNAKE_CASE = output.audios[0] assert audio.ndim == 1 assert len(snake_case__ ) / vocoder_sampling_rate == 0.032 def UpperCamelCase ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE = self.get_dummy_components() SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ ) audioldm_pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = ['hey'] SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 ) SCREAMING_SNAKE_CASE = output.audios.shape assert audio_shape == (1, 2_5_6) SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config config.model_in_dim *= 2 SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ ) SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 ) SCREAMING_SNAKE_CASE = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 2_5_6) def UpperCamelCase ( self : Tuple ): """simple docstring""" self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ ) def UpperCamelCase ( self : int ): """simple docstring""" self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCamelCase ( self : Dict ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ ) @slow class UpperCamelCase ( unittest.TestCase ): def UpperCamelCase ( self : Any ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ): """simple docstring""" SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) ) SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ ) SCREAMING_SNAKE_CASE = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def UpperCamelCase ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ ) audioldm_pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ ) SCREAMING_SNAKE_CASE = 2_5 SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0] assert audio.ndim == 1 assert len(snake_case__ ) == 8_1_9_2_0 SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0] SCREAMING_SNAKE_CASE = np.array( [-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] ) SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def UpperCamelCase ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ ) audioldm_pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ ) SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0] assert audio.ndim == 1 assert len(snake_case__ ) == 8_1_9_2_0 SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0] SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] ) SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
673
1
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput A : List[Any] = 'scheduler_config.json' class UpperCamelCase( _a ): snake_case_ : Any = 1 snake_case_ : Tuple = 2 snake_case_ : Any = 3 snake_case_ : List[Any] = 4 snake_case_ : int = 5 snake_case_ : List[Any] = 6 snake_case_ : Dict = 7 snake_case_ : Union[str, Any] = 8 snake_case_ : int = 9 snake_case_ : List[Any] = 10 snake_case_ : int = 11 snake_case_ : Any = 12 snake_case_ : List[Any] = 13 snake_case_ : Tuple = 14 @dataclass class UpperCamelCase( _a ): snake_case_ : torch.FloatTensor class UpperCamelCase: snake_case_ : Optional[Any] = SCHEDULER_CONFIG_NAME snake_case_ : List[Any] = [] snake_case_ : Dict = True @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any , SCREAMING_SNAKE_CASE : Dict[str, Any] = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Union[str, Any]=False , **SCREAMING_SNAKE_CASE : Any , ) -> List[str]: '''simple docstring''' __snake_case , __snake_case , __snake_case = cls.load_config( pretrained_model_name_or_path=SCREAMING_SNAKE_CASE , subfolder=SCREAMING_SNAKE_CASE , return_unused_kwargs=SCREAMING_SNAKE_CASE , return_commit_hash=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) return cls.from_config(SCREAMING_SNAKE_CASE , return_unused_kwargs=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]: '''simple docstring''' self.save_config(save_directory=SCREAMING_SNAKE_CASE , push_to_hub=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' return self._get_compatibles() @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Dict ) -> Any: '''simple docstring''' __snake_case = list(set([cls.__name__] + cls._compatibles ) ) __snake_case = importlib.import_module(__name__.split("." )[0] ) __snake_case = [ getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for c in compatible_classes_str if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] return compatible_classes
371
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter A : Union[str, Any] = logging.get_logger(__name__) A : Dict[Optional[str], Type[Formatter]] = {} A : Dict[Optional[str], str] = {} A : Dict[Optional[str], Exception] = {} def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , ) -> str: '''simple docstring''' __snake_case = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) __snake_case = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) __snake_case = format_type def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[Any]: '''simple docstring''' __snake_case = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): __snake_case = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['python']) _register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) _register_formatter(NumpyFormatter, 'numpy', aliases=['np']) _register_formatter(PandasFormatter, 'pandas', aliases=['pd']) _register_formatter(CustomFormatter, 'custom') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) else: A : Any = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) else: A : Tuple = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, 'jax', aliases=[]) else: A : Optional[int] = ValueError('JAX needs to be installed to be able to return JAX arrays.') _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) def _lowerCAmelCase ( _lowerCAmelCase ) -> Optional[str]: '''simple docstring''' if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def _lowerCAmelCase ( _lowerCAmelCase , **_lowerCAmelCase ) -> Formatter: '''simple docstring''' __snake_case = get_format_type_from_alias(_lowerCAmelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**_lowerCAmelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
371
1
"""simple docstring""" from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
215
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
215
1
import qiskit def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : int ): lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' ) # Create a Quantum Circuit acting on the q register lowerCAmelCase_ : str = qiskit.QuantumCircuit(__UpperCamelCase ,__UpperCamelCase ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] ,[0, 1] ) # Execute the circuit on the qasm simulator lowerCAmelCase_ : str = qiskit.execute(__UpperCamelCase ,__UpperCamelCase ,shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(__UpperCamelCase ) if __name__ == "__main__": A__ : Union[str, Any] = single_qubit_measure(2, 2) print(F'''Total count for various states are: {counts}''')
171
'''simple docstring''' def lowerCAmelCase_ ( a : int , a : int ): return 1 if input_a == input_a else 0 def lowerCAmelCase_ ( ): assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
394
0
from scipy.stats import spearmanr import datasets _lowerCamelCase ="\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n" _lowerCamelCase ="\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n" _lowerCamelCase =R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): """simple docstring""" def _lowerCAmelCase ( self : Any ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) ,reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] ,) def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Tuple ,snake_case : Dict=False ): SCREAMING_SNAKE_CASE =spearmanr(snake_case ,snake_case ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
252
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class a_ ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase = MODEL_FOR_MASKED_LM_MAPPING __UpperCAmelCase = TF_MODEL_FOR_MASKED_LM_MAPPING def _lowerCAmelCase ( self : Any ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE =pipeline(task='fill-mask' ,model='sshleifer/tiny-distilroberta-base' ,top_k=2 ,framework='tf' ) SCREAMING_SNAKE_CASE =unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(snake_case ,decimals=6 ) ,[ {'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'}, {'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'}, ] ,) SCREAMING_SNAKE_CASE =unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(snake_case ,decimals=6 ) ,[ { 'sequence': 'The largest city in France is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped', }, { 'sequence': 'The largest city in France is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser', }, ] ,) SCREAMING_SNAKE_CASE =unmasker('My name is <mask>' ,targets=[' Patrick', ' Clara', ' Teven'] ,top_k=3 ) self.assertEqual( nested_simplify(snake_case ,decimals=6 ) ,[ {'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'}, {'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'}, ] ,) @require_torch def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =pipeline(task='fill-mask' ,model='sshleifer/tiny-distilroberta-base' ,top_k=2 ,framework='pt' ) SCREAMING_SNAKE_CASE =unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(snake_case ,decimals=6 ) ,[ {'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'}, {'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'}, ] ,) SCREAMING_SNAKE_CASE =unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(snake_case ,decimals=6 ) ,[ { 'sequence': 'The largest city in France is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', }, {'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'}, ] ,) SCREAMING_SNAKE_CASE =unmasker('My name is <mask>' ,targets=[' Patrick', ' Clara', ' Teven'] ,top_k=3 ) self.assertEqual( nested_simplify(snake_case ,decimals=6 ) ,[ {'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'}, {'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'}, ] ,) SCREAMING_SNAKE_CASE =unmasker('My name is <mask> <mask>' ,top_k=2 ) self.assertEqual( nested_simplify(snake_case ,decimals=6 ) ,[ [ { 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', 'sequence': '<s>My name is Maul<mask></s>', }, {'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'}, ], [ { 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul', 'sequence': '<s>My name is<mask> Maul</s>', }, {'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'}, ], ] ,) @require_torch_gpu def _lowerCAmelCase ( self : Any ): SCREAMING_SNAKE_CASE =pipeline('fill-mask' ,model='hf-internal-testing/tiny-random-distilbert' ,device=0 ,framework='pt' ) # convert model to fp16 pipe.model.half() SCREAMING_SNAKE_CASE =pipe('Paris is the [MASK] of France.' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(snake_case ,snake_case ) @slow @require_torch def _lowerCAmelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE =pipeline(task='fill-mask' ,model='distilroberta-base' ,top_k=2 ,framework='pt' ) self.run_large_test(snake_case ) @slow @require_tf def _lowerCAmelCase ( self : List[str] ): SCREAMING_SNAKE_CASE =pipeline(task='fill-mask' ,model='distilroberta-base' ,top_k=2 ,framework='tf' ) self.run_large_test(snake_case ) def _lowerCAmelCase ( self : List[Any] ,snake_case : List[str] ): SCREAMING_SNAKE_CASE =unmasker('My name is <mask>' ) self.assertEqual( nested_simplify(snake_case ) ,[ {'sequence': 'My name is John', 'score': 0.008, 'token': 610, 'token_str': ' John'}, {'sequence': 'My name is Chris', 'score': 0.007, 'token': 1573, 'token_str': ' Chris'}, ] ,) SCREAMING_SNAKE_CASE =unmasker('The largest city in France is <mask>' ) self.assertEqual( nested_simplify(snake_case ) ,[ { 'sequence': 'The largest city in France is Paris', 'score': 0.251, 'token': 2201, 'token_str': ' Paris', }, { 'sequence': 'The largest city in France is Lyon', 'score': 0.214, 'token': 12790, 'token_str': ' Lyon', }, ] ,) SCREAMING_SNAKE_CASE =unmasker('My name is <mask>' ,targets=[' Patrick', ' Clara', ' Teven'] ,top_k=3 ) self.assertEqual( nested_simplify(snake_case ) ,[ {'sequence': 'My name is Patrick', 'score': 0.005, 'token': 3499, 'token_str': ' Patrick'}, {'sequence': 'My name is Clara', 'score': 0.000, 'token': 13606, 'token_str': ' Clara'}, {'sequence': 'My name is Te', 'score': 0.000, 'token': 2941, 'token_str': ' Te'}, ] ,) @require_torch def _lowerCAmelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE =pipeline(task='fill-mask' ,model='sshleifer/tiny-distilroberta-base' ,framework='pt' ) SCREAMING_SNAKE_CASE =None SCREAMING_SNAKE_CASE =None self.run_pipeline_test(snake_case ,[] ) @require_tf def _lowerCAmelCase ( self : int ): SCREAMING_SNAKE_CASE =pipeline(task='fill-mask' ,model='sshleifer/tiny-distilroberta-base' ,framework='tf' ) SCREAMING_SNAKE_CASE =None SCREAMING_SNAKE_CASE =None self.run_pipeline_test(snake_case ,[] ) def _lowerCAmelCase ( self : Tuple ,snake_case : Dict ,snake_case : Optional[Any] ,snake_case : Dict ): if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' ) SCREAMING_SNAKE_CASE =FillMaskPipeline(model=snake_case ,tokenizer=snake_case ) SCREAMING_SNAKE_CASE =[ f'This is another {tokenizer.mask_token} test', ] return fill_masker, examples def _lowerCAmelCase ( self : int ,snake_case : Optional[Any] ,snake_case : str ): SCREAMING_SNAKE_CASE =fill_masker.tokenizer SCREAMING_SNAKE_CASE =fill_masker.model SCREAMING_SNAKE_CASE =fill_masker( f'This is a {tokenizer.mask_token}' ,) self.assertEqual( snake_case ,[ {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, ] ,) SCREAMING_SNAKE_CASE =fill_masker([f'This is a {tokenizer.mask_token}'] ) self.assertEqual( snake_case ,[ {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, ] ,) SCREAMING_SNAKE_CASE =fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] ) self.assertEqual( snake_case ,[ [ {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, ], [ {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, ], ] ,) with self.assertRaises(snake_case ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(snake_case ): fill_masker('This is' ) self.run_test_top_k(snake_case ,snake_case ) self.run_test_targets(snake_case ,snake_case ) self.run_test_top_k_targets(snake_case ,snake_case ) self.fill_mask_with_duplicate_targets_and_top_k(snake_case ,snake_case ) self.fill_mask_with_multiple_masks(snake_case ,snake_case ) def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[Any] ,snake_case : List[str] ): SCREAMING_SNAKE_CASE =tokenizer.get_vocab() SCREAMING_SNAKE_CASE =sorted(vocab.keys() )[:2] # Pipeline argument SCREAMING_SNAKE_CASE =FillMaskPipeline(model=snake_case ,tokenizer=snake_case ,targets=snake_case ) SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ) self.assertEqual( snake_case ,[ {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, ] ,) SCREAMING_SNAKE_CASE ={vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} ,snake_case ) SCREAMING_SNAKE_CASE =[tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} ,set(snake_case ) ) # Call argument SCREAMING_SNAKE_CASE =FillMaskPipeline(model=snake_case ,tokenizer=snake_case ) SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,targets=snake_case ) self.assertEqual( snake_case ,[ {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, ] ,) SCREAMING_SNAKE_CASE ={vocab[el] for el in targets} self.assertEqual({el['token'] for el in outputs} ,snake_case ) SCREAMING_SNAKE_CASE =[tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['token_str'] for el in outputs} ,set(snake_case ) ) # Score equivalence SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,targets=snake_case ) SCREAMING_SNAKE_CASE =[top_mask['token_str'] for top_mask in outputs] SCREAMING_SNAKE_CASE =[top_mask['score'] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(snake_case ) == set(snake_case ): SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,targets=snake_case ) SCREAMING_SNAKE_CASE =[top_mask['score'] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(snake_case ) ,nested_simplify(snake_case ) ) # Raises with invalid with self.assertRaises(snake_case ): SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(snake_case ): SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,targets=[''] ) with self.assertRaises(snake_case ): SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,targets='' ) def _lowerCAmelCase ( self : List[Any] ,snake_case : Dict ,snake_case : List[str] ): SCREAMING_SNAKE_CASE =FillMaskPipeline(model=snake_case ,tokenizer=snake_case ,top_k=2 ) SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ) self.assertEqual( snake_case ,[ {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, ] ,) SCREAMING_SNAKE_CASE =FillMaskPipeline(model=snake_case ,tokenizer=snake_case ) SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,top_k=2 ) self.assertEqual( snake_case ,[ {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, ] ,) self.assertEqual(nested_simplify(snake_case ) ,nested_simplify(snake_case ) ) def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[str] ,snake_case : Tuple ): SCREAMING_SNAKE_CASE =tokenizer.get_vocab() SCREAMING_SNAKE_CASE =FillMaskPipeline(model=snake_case ,tokenizer=snake_case ) # top_k=2, ntargets=3 SCREAMING_SNAKE_CASE =sorted(vocab.keys() )[:3] SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,top_k=2 ,targets=snake_case ) # If we use the most probably targets, and filter differently, we should still # have the same results SCREAMING_SNAKE_CASE =[el['token_str'] for el in sorted(snake_case ,key=lambda snake_case : x["score"] ,reverse=snake_case )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(snake_case ).issubset(snake_case ): SCREAMING_SNAKE_CASE =fill_masker(f'This is a {tokenizer.mask_token}' ,top_k=3 ,targets=snake_case ) # They should yield exactly the same result self.assertEqual(nested_simplify(snake_case ) ,nested_simplify(snake_case ) ) def _lowerCAmelCase ( self : Any ,snake_case : str ,snake_case : Dict ): SCREAMING_SNAKE_CASE =FillMaskPipeline(model=snake_case ,tokenizer=snake_case ) SCREAMING_SNAKE_CASE =tokenizer.get_vocab() # String duplicates + id duplicates SCREAMING_SNAKE_CASE =sorted(vocab.keys() )[:3] SCREAMING_SNAKE_CASE =[targets[0], targets[1], targets[0], targets[2], targets[1]] SCREAMING_SNAKE_CASE =fill_masker(f'My name is {tokenizer.mask_token}' ,targets=snake_case ,top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(snake_case ) ,3 ) def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ,snake_case : Any ): SCREAMING_SNAKE_CASE =FillMaskPipeline(model=snake_case ,tokenizer=snake_case ) SCREAMING_SNAKE_CASE =fill_masker( f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' ,top_k=2 ) self.assertEqual( snake_case ,[ [ {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, ], [ {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, ], [ {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, {'sequence': ANY(snake_case ), 'score': ANY(snake_case ), 'token': ANY(snake_case ), 'token_str': ANY(snake_case )}, ], ] ,)
252
1
'''simple docstring''' from __future__ import annotations def UpperCamelCase ( _lowerCamelCase : list[int] ): if len(_lowerCamelCase ) == 0: return array A__, A__ = min(_lowerCamelCase ), max(_lowerCamelCase ) # Compute the variables A__ = _max - _min + 1 A__, A__ = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: A__ = i - _min A__ = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. A__ = 0 for i in range(_lowerCamelCase ): while holes_repeat[i] > 0: A__ = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[str] =input("Enter numbers separated by comma:\n") __lowerCAmelCase : Tuple =[int(x) for x in user_input.split(",")] print(pigeon_sort(unsorted))
440
'''simple docstring''' import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration __lowerCAmelCase : int =5_0000 __lowerCAmelCase : Union[str, Any] =5000 __lowerCAmelCase , __lowerCAmelCase : Tuple =os.path.split(__file__) __lowerCAmelCase : int =os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def UpperCamelCase ( _lowerCamelCase : datasets.Dataset , _lowerCamelCase : int ): for i in range(_lowerCamelCase ): A__ = dataset[i] @get_duration def UpperCamelCase ( _lowerCamelCase : datasets.Dataset , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any ): for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ): A__ = dataset[i : i + batch_size] @get_duration def UpperCamelCase ( _lowerCamelCase : datasets.Dataset , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] ): with dataset.formatted_as(type=_lowerCamelCase ): for i in range(_lowerCamelCase ): A__ = dataset[i] @get_duration def UpperCamelCase ( _lowerCamelCase : datasets.Dataset , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Dict ): with dataset.formatted_as(type=_lowerCamelCase ): for i in range(0 , _lowerCamelCase , _lowerCamelCase ): A__ = dataset[i : i + batch_size] def UpperCamelCase ( ): A__ = {"num examples": SPEED_TEST_N_EXAMPLES} A__ = [ (read, {"length": SMALL_TEST}), (read, {"length": SPEED_TEST_N_EXAMPLES}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_00}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10_00}), (read_formatted, {"type": "numpy", "length": SMALL_TEST}), (read_formatted, {"type": "pandas", "length": SMALL_TEST}), (read_formatted, {"type": "torch", "length": SMALL_TEST}), (read_formatted, {"type": "tensorflow", "length": SMALL_TEST}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10_00}), ] A__ = [ (read, {"length": SMALL_TEST}), (read, {"length": SPEED_TEST_N_EXAMPLES}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_00}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10_00}), (read_formatted, {"type": "numpy", "length": SMALL_TEST}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10_00}), ] with tempfile.TemporaryDirectory() as tmp_dir: print("generating dataset" ) A__ = datasets.Features( {"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} ) A__ = generate_example_dataset( os.path.join(_lowerCamelCase , "dataset.arrow" ) , _lowerCamelCase , num_examples=_lowerCamelCase , seq_shapes={"list": (1_00,)} , ) print("first set of iterations" ) for func, kwargs in functions: print(func.__name__ , str(_lowerCamelCase ) ) A__ = func(_lowerCamelCase , **_lowerCamelCase ) print("shuffling dataset" ) A__ = dataset.shuffle() print("Second set of iterations (after shuffling" ) for func, kwargs in functions_shuffled: print("shuffled " , func.__name__ , str(_lowerCamelCase ) ) A__ = func( _lowerCamelCase , **_lowerCamelCase ) with open(_lowerCamelCase , "wb" ) as f: f.write(json.dumps(_lowerCamelCase ).encode("utf-8" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
440
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor A : str = logging.get_logger(__name__) class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__( self , *__a , **__a ): warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , __a , ) super().__init__(*__a , **__a )
706
"""simple docstring""" import tensorflow as tf from ...tf_utils import shape_list class _UpperCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , __a , __a , __a , __a , __a=1 , __a=False , **__a ): super().__init__(**__a ) __lowerCAmelCase = vocab_size __lowerCAmelCase = d_embed __lowerCAmelCase = d_proj __lowerCAmelCase = cutoffs + [vocab_size] __lowerCAmelCase = [0] + self.cutoffs __lowerCAmelCase = div_val __lowerCAmelCase = self.cutoffs[0] __lowerCAmelCase = len(self.cutoffs ) - 1 __lowerCAmelCase = self.shortlist_size + self.n_clusters __lowerCAmelCase = keep_order __lowerCAmelCase = [] __lowerCAmelCase = [] def snake_case ( self , __a ): if self.n_clusters > 0: __lowerCAmelCase = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=__a , name="cluster_weight" ) __lowerCAmelCase = self.add_weight( shape=(self.n_clusters,) , initializer="zeros" , trainable=__a , name="cluster_bias" ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: __lowerCAmelCase = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=__a , name=f"out_projs_._{i}" , ) self.out_projs.append(__a ) else: self.out_projs.append(__a ) __lowerCAmelCase = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._weight" , ) __lowerCAmelCase = self.add_weight( shape=(self.vocab_size,) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._bias" , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): __lowerCAmelCase , __lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] __lowerCAmelCase = self.d_embed // (self.div_val**i) __lowerCAmelCase = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=__a , name=f"out_projs_._{i}" ) self.out_projs.append(__a ) __lowerCAmelCase = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._weight" , ) __lowerCAmelCase = self.add_weight( shape=(r_idx - l_idx,) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._bias" , ) self.out_layers.append((weight, bias) ) super().build(__a ) @staticmethod def snake_case ( __a , __a , __a , __a=None ): __lowerCAmelCase = x if proj is not None: __lowerCAmelCase = tf.einsum("ibd,ed->ibe" , __a , __a ) return tf.einsum("ibd,nd->ibn" , __a , __a ) + b @staticmethod def snake_case ( __a , __a ): __lowerCAmelCase = shape_list(__a ) __lowerCAmelCase = tf.range(lp_size[0] , dtype=target.dtype ) __lowerCAmelCase = tf.stack([r, target] , 1 ) return tf.gather_nd(__a , __a ) def snake_case ( self , __a , __a , __a=True , __a=False ): __lowerCAmelCase = 0 if self.n_clusters == 0: __lowerCAmelCase = self._logit(__a , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: __lowerCAmelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__a , logits=__a ) __lowerCAmelCase = tf.nn.log_softmax(__a , axis=-1 ) else: __lowerCAmelCase = shape_list(__a ) __lowerCAmelCase = [] __lowerCAmelCase = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): __lowerCAmelCase , __lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: __lowerCAmelCase = (target >= l_idx) & (target < r_idx) __lowerCAmelCase = tf.where(__a ) __lowerCAmelCase = tf.boolean_mask(__a , __a ) - l_idx if self.div_val == 1: __lowerCAmelCase = self.out_layers[0][0][l_idx:r_idx] __lowerCAmelCase = self.out_layers[0][1][l_idx:r_idx] else: __lowerCAmelCase = self.out_layers[i][0] __lowerCAmelCase = self.out_layers[i][1] if i == 0: __lowerCAmelCase = tf.concat([cur_W, self.cluster_weight] , 0 ) __lowerCAmelCase = tf.concat([cur_b, self.cluster_bias] , 0 ) __lowerCAmelCase = self._logit(__a , __a , __a , self.out_projs[0] ) __lowerCAmelCase = tf.nn.log_softmax(__a ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: __lowerCAmelCase = tf.boolean_mask(__a , __a ) __lowerCAmelCase = self._gather_logprob(__a , __a ) else: __lowerCAmelCase = self._logit(__a , __a , __a , self.out_projs[i] ) __lowerCAmelCase = tf.nn.log_softmax(__a ) __lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster __lowerCAmelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(__a ) if target is not None: __lowerCAmelCase = tf.boolean_mask(__a , __a ) __lowerCAmelCase = tf.boolean_mask(__a , __a ) __lowerCAmelCase = self._gather_logprob(__a , __a ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(__a , -cur_logprob , shape_list(__a ) ) __lowerCAmelCase = tf.concat(__a , axis=-1 ) if target is not None: if return_mean: __lowerCAmelCase = tf.reduce_mean(__a ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(__a ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(__a , name=self.name , aggregation="mean" if return_mean else "" ) return out
282
0
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _SCREAMING_SNAKE_CASE ( A ): def __snake_case( self , A_ ): with open(A_ , encoding="""utf-8""" ) as input_file: _UpperCAmelCase : int = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" ) _UpperCAmelCase : int = input_file.read() _UpperCAmelCase : int = regexp.search(A_ ) return match def __snake_case( self , A_ ): with open(A_ , encoding="""utf-8""" ) as input_file: _UpperCAmelCase : List[Any] = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL ) _UpperCAmelCase : List[Any] = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` _UpperCAmelCase : int = regexp.finditer(A_ ) _UpperCAmelCase : List[Any] = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def __snake_case( self ): _UpperCAmelCase : Dict = Path("""./datasets""" ) _UpperCAmelCase : List[str] = list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(A_ ) ): raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' ) def __snake_case( self ): _UpperCAmelCase : Optional[int] = Path("""./datasets""" ) _UpperCAmelCase : List[Any] = list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_print_statements(str(A_ ) ): raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
643
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } SCREAMING_SNAKE_CASE__ : Optional[int] = { 'b0': { 'hidden_dim': 1_280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1_280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1_408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1_536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1_792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2_048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2_304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2_560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def a__ ( snake_case__ : Optional[Any] ): _UpperCAmelCase : Optional[int] = EfficientNetConfig() _UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""hidden_dim"""] _UpperCAmelCase : Any = CONFIG_MAP[model_name]["""width_coef"""] _UpperCAmelCase : Optional[Any] = CONFIG_MAP[model_name]["""depth_coef"""] _UpperCAmelCase : int = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase : List[str] = CONFIG_MAP[model_name]["""dropout_rate"""] _UpperCAmelCase : Optional[int] = CONFIG_MAP[model_name]["""dw_padding"""] _UpperCAmelCase : int = """huggingface/label-files""" _UpperCAmelCase : Dict = """imagenet-1k-id2label.json""" _UpperCAmelCase : Optional[Any] = 1000 _UpperCAmelCase : List[str] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) ) _UpperCAmelCase : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()} _UpperCAmelCase : Any = idalabel _UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()} return config def a__ ( ): _UpperCAmelCase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase : str = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im def a__ ( snake_case__ : Tuple ): _UpperCAmelCase : List[str] = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase : str = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=snake_case__ , ) return preprocessor def a__ ( snake_case__ : Optional[Any] ): _UpperCAmelCase : Optional[int] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] _UpperCAmelCase : Dict = sorted(set(snake_case__ ) ) _UpperCAmelCase : int = len(snake_case__ ) _UpperCAmelCase : Optional[Any] = {b: str(snake_case__ ) for b, i in zip(snake_case__ , range(snake_case__ ) )} _UpperCAmelCase : Union[str, Any] = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: _UpperCAmelCase : List[Any] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) _UpperCAmelCase : Union[str, Any] = {} for item in rename_keys: if item[0] in original_param_names: _UpperCAmelCase : Tuple = """efficientnet.""" + item[1] _UpperCAmelCase : List[Any] = """classifier.weight""" _UpperCAmelCase : List[Any] = """classifier.bias""" return key_mapping def a__ ( snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str] ): for key, value in tf_params.items(): if "normalization" in key: continue _UpperCAmelCase : Optional[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: _UpperCAmelCase : Optional[Any] = torch.from_numpy(snake_case__ ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: _UpperCAmelCase : Dict = torch.from_numpy(snake_case__ ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: _UpperCAmelCase : Any = torch.from_numpy(np.transpose(snake_case__ ) ) else: _UpperCAmelCase : Optional[Any] = torch.from_numpy(snake_case__ ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(snake_case__ ) @torch.no_grad() def a__ ( snake_case__ : str , snake_case__ : Dict , snake_case__ : int , snake_case__ : str ): _UpperCAmelCase : List[str] = model_classes[model_name]( include_top=snake_case__ , weights="""imagenet""" , input_tensor=snake_case__ , input_shape=snake_case__ , pooling=snake_case__ , classes=1000 , classifier_activation="""softmax""" , ) _UpperCAmelCase : Any = original_model.trainable_variables _UpperCAmelCase : List[Any] = original_model.non_trainable_variables _UpperCAmelCase : List[Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: _UpperCAmelCase : List[str] = param.numpy() _UpperCAmelCase : List[Any] = list(tf_params.keys() ) # Load HuggingFace model _UpperCAmelCase : Any = get_efficientnet_config(snake_case__ ) _UpperCAmelCase : List[str] = EfficientNetForImageClassification(snake_case__ ).eval() _UpperCAmelCase : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) _UpperCAmelCase : Optional[Any] = rename_keys(snake_case__ ) replace_params(snake_case__ , snake_case__ , snake_case__ ) # Initialize preprocessor and preprocess input image _UpperCAmelCase : Dict = convert_image_processor(snake_case__ ) _UpperCAmelCase : List[Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): _UpperCAmelCase : List[str] = hf_model(**snake_case__ ) _UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference _UpperCAmelCase : Dict = False _UpperCAmelCase : List[Any] = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) _UpperCAmelCase : Optional[Any] = image.img_to_array(snake_case__ ) _UpperCAmelCase : Any = np.expand_dims(snake_case__ , axis=0 ) _UpperCAmelCase : List[Any] = original_model.predict(snake_case__ ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(snake_case__ , snake_case__ , atol=1e-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(snake_case__ ): os.mkdir(snake_case__ ) # Save converted model and image processor hf_model.save_pretrained(snake_case__ ) preprocessor.save_pretrained(snake_case__ ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) _UpperCAmelCase : int = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(snake_case__ ) hf_model.push_to_hub(snake_case__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
643
1
"""simple docstring""" import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __magic_name__ = logging.get_logger(__name__) __magic_name__ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } __magic_name__ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): """simple docstring""" for attribute in key.split(""".""" ): lowerCamelCase__ = getattr(__lowercase , __lowercase ) if weight_type is not None: lowerCamelCase__ = getattr(__lowercase , __lowercase ).shape else: lowerCamelCase__ = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowerCamelCase__ = value elif weight_type == "weight_g": lowerCamelCase__ = value elif weight_type == "weight_v": lowerCamelCase__ = value elif weight_type == "bias": lowerCamelCase__ = value else: lowerCamelCase__ = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def _A ( __lowercase , __lowercase ): """simple docstring""" lowerCamelCase__ = [] lowerCamelCase__ = fairseq_model.state_dict() lowerCamelCase__ = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight lowerCamelCase__ = None for name, value in fairseq_dict.items(): lowerCamelCase__ = False if "conv_layers" in name: load_conv_layer( __lowercase , __lowercase , __lowercase , __lowercase , hf_model.config.feat_extract_norm == """group""" , ) lowerCamelCase__ = True elif name.split(""".""" )[0] == "proj": lowerCamelCase__ = fairseq_model.proj lowerCamelCase__ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowerCamelCase__ = True if "*" in mapped_key: lowerCamelCase__ = name.split(__lowercase )[0].split(""".""" )[-2] lowerCamelCase__ = mapped_key.replace("""*""" , __lowercase ) if "weight_g" in name: lowerCamelCase__ = """weight_g""" elif "weight_v" in name: lowerCamelCase__ = """weight_v""" elif "bias" in name: lowerCamelCase__ = """bias""" elif "weight" in name: lowerCamelCase__ = """weight""" else: lowerCamelCase__ = None set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) continue if not is_used: unused_weights.append(__lowercase ) logger.warning(f"""Unused weights: {unused_weights}""" ) return proj_weight def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): """simple docstring""" lowerCamelCase__ = full_name.split("""conv_layers.""" )[-1] lowerCamelCase__ = name.split(""".""" ) lowerCamelCase__ = int(items[0] ) lowerCamelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowerCamelCase__ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowerCamelCase__ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) lowerCamelCase__ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowerCamelCase__ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__lowercase ) def _A ( __lowercase ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape lowerCamelCase__ = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) lowerCamelCase__ = emb.weight.data return lin_layer def _A ( __lowercase ): """simple docstring""" with open(__lowercase , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = [line.split(""" """ )[0] for line in lines] lowerCamelCase__ = len(__lowercase ) lowerCamelCase__ = { """<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3, } vocab_dict.update(dict(zip(__lowercase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ): """simple docstring""" lowerCamelCase__ = WavaVecaConfig.from_pretrained(__lowercase ) lowerCamelCase__ = SpeechaTextaConfig.from_pretrained( __lowercase , vocab_size=__lowercase , decoder_layers=__lowercase , do_stable_layer_norm=__lowercase ) lowerCamelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowercase , return_attention_mask=__lowercase , ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) lowerCamelCase__ = model[0].eval() # set weights for wav2vec2 encoder lowerCamelCase__ = WavaVecaModel(__lowercase ) lowerCamelCase__ = recursively_load_weights_wavaveca(model.encoder , __lowercase ) lowerCamelCase__ = SpeechaTextaForCausalLM(__lowercase ) lowerCamelCase__ , lowerCamelCase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__lowercase ) # set output linear layer unexpected_keys.remove("""embed_out""" ) lowerCamelCase__ = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) lowerCamelCase__ = SpeechEncoderDecoderModel(encoder=__lowercase , decoder=__lowercase ) lowerCamelCase__ = False # add projection layer lowerCamelCase__ = nn.Parameter(projection_layer.weight ) lowerCamelCase__ = nn.Parameter(projection_layer.bias ) lowerCamelCase__ = create_vocab_dict(__lowercase ) with open(os.path.join(__lowercase , """vocab.json""" ) , """w""" ) as fp: json.dump(__lowercase , __lowercase ) lowerCamelCase__ = SpeechaTextaTokenizer(os.path.join(__lowercase , """vocab.json""" ) ) tokenizer.save_pretrained(__lowercase ) lowerCamelCase__ = hf_wavavec.config.to_dict() lowerCamelCase__ = tokenizer.pad_token_id lowerCamelCase__ = tokenizer.bos_token_id lowerCamelCase__ = tokenizer.eos_token_id lowerCamelCase__ = """speech_to_text_2""" lowerCamelCase__ = """wav2vec2""" lowerCamelCase__ = SpeechEncoderDecoderConfig.from_dict(__lowercase ) hf_wavavec.save_pretrained(__lowercase ) feature_extractor.save_pretrained(__lowercase ) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument( """--encoder_config_path""", default="""facebook/wav2vec2-large-lv60""", type=str, help="""Path to hf encoder wav2vec2 checkpoint config""", ) parser.add_argument( """--decoder_config_path""", default="""facebook/s2t-small-mustc-en-fr-st""", type=str, help="""Path to hf decoder s2t checkpoint config""", ) parser.add_argument("""--vocab_size""", default=1_02_24, type=int, help="""Vocab size of decoder""") parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""") __magic_name__ = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
258
"""simple docstring""" from math import sqrt def _A ( __lowercase ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(__lowercase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _A ( __lowercase = 1_0001 ): """simple docstring""" lowerCamelCase__ = 0 lowerCamelCase__ = 1 while count != nth and number < 3: number += 1 if is_prime(__lowercase ): count += 1 while count != nth: number += 2 if is_prime(__lowercase ): count += 1 return number if __name__ == "__main__": print(F'{solution() = }')
258
1
'''simple docstring''' from math import isclose, sqrt def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> tuple[float, float, float]: lowerCamelCase_ = point_y / 4 / point_x lowerCamelCase_ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) lowerCamelCase_ = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) lowerCamelCase_ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 lowerCamelCase_ = outgoing_gradient**2 + 4 lowerCamelCase_ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) lowerCamelCase_ = (point_y - outgoing_gradient * point_x) ** 2 - 1_00 lowerCamelCase_ = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) lowerCamelCase_ = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point lowerCamelCase_ = x_minus if isclose(__UpperCamelCase ,__UpperCamelCase ) else x_plus lowerCamelCase_ = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def _UpperCamelCase ( __UpperCamelCase = 1.4 ,__UpperCamelCase = -9.6 ) -> int: lowerCamelCase_ = 0 lowerCamelCase_ = first_x_coord lowerCamelCase_ = first_y_coord lowerCamelCase_ = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = next_point(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
42
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.ndarray: # prepare kernel # the kernel size have to be odd if (ksize % 2) == 0: lowerCamelCase_ = ksize + 1 lowerCamelCase_ = np.zeros((ksize, ksize) ,dtype=np.floataa ) # each value for y in range(__UpperCamelCase ): for x in range(__UpperCamelCase ): # distance from center lowerCamelCase_ = x - ksize // 2 lowerCamelCase_ = y - ksize // 2 # degree to radiant lowerCamelCase_ = theta / 1_80 * np.pi lowerCamelCase_ = np.cos(_theta ) lowerCamelCase_ = np.sin(_theta ) # get kernel x lowerCamelCase_ = cos_theta * px + sin_theta * py # get kernel y lowerCamelCase_ = -sin_theta * px + cos_theta * py # fill kernel lowerCamelCase_ = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image A_ = imread("../image_data/lena.jpg") # turn image in gray scale value A_ = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges A_ = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: A_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) A_ = out / out.max() * 255 A_ = out.astype(np.uinta) imshow("Original", gray) imshow("Gabor filter with 20x20 mask and 6 directions", out) waitKey(0)
42
1
from ..utils import DummyObject, requires_backends class UpperCamelCase( metaclass=_a ): snake_case_ : int = ["""flax"""] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Dict ) -> int: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Tuple ) -> Any: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Dict ) -> Dict: '''simple docstring''' requires_backends(cls , ["flax"] ) class UpperCamelCase( metaclass=_a ): snake_case_ : Union[str, Any] = ["""flax"""] def __init__( self : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ) -> int: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Dict ) -> Any: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["flax"] ) class UpperCamelCase( metaclass=_a ): snake_case_ : Union[str, Any] = ["""flax"""] def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["flax"] ) class UpperCamelCase( metaclass=_a ): snake_case_ : Tuple = ["""flax"""] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Dict , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : int ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) class UpperCamelCase( metaclass=_a ): snake_case_ : List[str] = ["""flax"""] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ) -> str: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ) -> int: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["flax"] ) class UpperCamelCase( metaclass=_a ): snake_case_ : int = ["""flax"""] def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) class UpperCamelCase( metaclass=_a ): snake_case_ : Union[str, Any] = ["""flax"""] def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ) -> str: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: '''simple docstring''' requires_backends(cls , ["flax"] ) class UpperCamelCase( metaclass=_a ): snake_case_ : Optional[Any] = ["""flax"""] def __init__( self : str , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : str ) -> Tuple: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple: '''simple docstring''' requires_backends(cls , ["flax"] ) class UpperCamelCase( metaclass=_a ): snake_case_ : List[Any] = ["""flax"""] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : int ) -> Tuple: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Optional[Any] ) -> str: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["flax"] ) class UpperCamelCase( metaclass=_a ): snake_case_ : List[str] = ["""flax"""] def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ) -> Dict: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ["flax"] ) class UpperCamelCase( metaclass=_a ): snake_case_ : Optional[int] = ["""flax"""] def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Dict ) -> str: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ) -> List[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) class UpperCamelCase( metaclass=_a ): snake_case_ : Any = ["""flax"""] def __init__( self : str , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : str ) -> List[Any]: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Dict , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : int ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ["flax"] ) class UpperCamelCase( metaclass=_a ): snake_case_ : str = ["""flax"""] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ) -> int: '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : str ) -> Any: '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Tuple ) -> Tuple: '''simple docstring''' requires_backends(cls , ["flax"] )
717
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging A : Tuple = logging.get_logger(__name__) A : int = { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class UpperCamelCase( _a ): snake_case_ : str = """blenderbot-small""" snake_case_ : List[Any] = ["""past_key_values"""] snake_case_ : Any = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=5_0_2_6_5 , SCREAMING_SNAKE_CASE : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE : Any=8 , SCREAMING_SNAKE_CASE : int=2_0_4_8 , SCREAMING_SNAKE_CASE : Any=1_6 , SCREAMING_SNAKE_CASE : Tuple=8 , SCREAMING_SNAKE_CASE : List[Any]=2_0_4_8 , SCREAMING_SNAKE_CASE : Optional[int]=1_6 , SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE : str=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE : Dict=5_1_2 , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Dict=0.0 , SCREAMING_SNAKE_CASE : Any=0.0 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Union[str, Any]=0 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Any=2 , **SCREAMING_SNAKE_CASE : Dict , ) -> Optional[int]: '''simple docstring''' __snake_case = vocab_size __snake_case = max_position_embeddings __snake_case = d_model __snake_case = encoder_ffn_dim __snake_case = encoder_layers __snake_case = encoder_attention_heads __snake_case = decoder_ffn_dim __snake_case = decoder_layers __snake_case = decoder_attention_heads __snake_case = dropout __snake_case = attention_dropout __snake_case = activation_dropout __snake_case = activation_function __snake_case = init_std __snake_case = encoder_layerdrop __snake_case = decoder_layerdrop __snake_case = use_cache __snake_case = encoder_layers __snake_case = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , forced_eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) class UpperCamelCase( _a ): @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __snake_case = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: __snake_case = {0: "batch"} __snake_case = {0: "batch", 1: "past_decoder_sequence + sequence"} else: __snake_case = {0: "batch", 1: "decoder_sequence"} __snake_case = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. __snake_case = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: __snake_case , __snake_case = self.num_layers for i in range(SCREAMING_SNAKE_CASE ): __snake_case = {0: "batch", 2: "past_sequence + sequence"} __snake_case = {0: "batch", 2: "past_sequence + sequence"} else: __snake_case = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def SCREAMING_SNAKE_CASE_ ( self : str ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __snake_case = super().outputs else: __snake_case = super(SCREAMING_SNAKE_CASE , self ).outputs if self.use_past: __snake_case , __snake_case = self.num_layers for i in range(SCREAMING_SNAKE_CASE ): __snake_case = {0: "batch", 2: "past_sequence + sequence"} __snake_case = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]: '''simple docstring''' __snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Generate decoder inputs __snake_case = seq_length if not self.use_past else 1 __snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __snake_case = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} __snake_case = dict(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch __snake_case , __snake_case = common_inputs["input_ids"].shape __snake_case = common_inputs["decoder_input_ids"].shape[1] __snake_case , __snake_case = self.num_attention_heads __snake_case = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __snake_case = decoder_seq_length + 3 __snake_case = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __snake_case = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )] , dim=1 ) __snake_case = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __snake_case , __snake_case = self.num_layers __snake_case = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __snake_case = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - min_num_layers __snake_case = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(SCREAMING_SNAKE_CASE ): common_inputs["past_key_values"].append( ( torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE ), ) ) # TODO: test this. __snake_case = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) ) return common_inputs def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]: '''simple docstring''' __snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch __snake_case , __snake_case = common_inputs["input_ids"].shape # Not using the same length for past_key_values __snake_case = seqlen + 2 __snake_case , __snake_case = self.num_layers __snake_case , __snake_case = self.num_attention_heads __snake_case = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __snake_case = common_inputs["attention_mask"].dtype __snake_case = torch.cat( [common_inputs["attention_mask"], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 ) __snake_case = [ (torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(SCREAMING_SNAKE_CASE ) ] return common_inputs def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]: '''simple docstring''' __snake_case = compute_effective_axis_dimension( SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __snake_case = tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE ) __snake_case = compute_effective_axis_dimension( SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE ) # Generate dummy inputs according to compute batch and sequence __snake_case = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size __snake_case = dict(tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE ) ) return common_inputs def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) elif self.task == "causal-lm": __snake_case = self._generate_dummy_inputs_for_causal_lm( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) else: __snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) return common_inputs def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __snake_case = super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: __snake_case = super(SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
473
0
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowercase : List[str] = { 'configuration_efficientnet': [ 'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EfficientNetConfig', 'EfficientNetOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : int = ['EfficientNetImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ 'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'EfficientNetForImageClassification', 'EfficientNetModel', 'EfficientNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys lowercase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
649
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowerCamelCase__ ( _a , unittest.TestCase ): a : Union[str, Any] = BarthezTokenizer a : Any = BarthezTokenizerFast a : Tuple = True a : List[Any] = True def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' super().setUp() __lowercase = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=A_ ) __lowercase = tokenizer def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' __lowercase = """<pad>""" __lowercase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' __lowercase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(A_ ) , 1_0_1_1_2_2 ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' __lowercase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __lowercase = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2] __lowercase = self.tokenizer( A_ , max_length=len(A_ ) , padding=A_ , truncation=A_ , return_tensors="""pt""" ) self.assertIsInstance(A_ , A_ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) __lowercase = batch.input_ids.tolist()[0] self.assertListEqual(A_ , A_ ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' if not self.test_rust_tokenizer: return __lowercase = self.get_tokenizer() __lowercase = self.get_rust_tokenizer() __lowercase = """I was born in 92000, and this is falsé.""" __lowercase = tokenizer.tokenize(A_ ) __lowercase = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) __lowercase = tokenizer.encode(A_ , add_special_tokens=A_ ) __lowercase = rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) __lowercase = self.get_rust_tokenizer() __lowercase = tokenizer.encode(A_ ) __lowercase = rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' __lowercase = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. __lowercase = [ """Le transformeur est un modèle d'apprentissage profond introduit en 2017, """ """utilisé principalement dans le domaine du traitement automatique des langues (TAL).""", """À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """ """pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """ """telles que la traduction et la synthèse de texte.""", ] self.tokenizer_integration_test_util( expected_encoding=A_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=A_ , )
616
0
import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _UpperCamelCase ( a__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase : Tuple = OpenAIGPTTokenizer lowerCamelCase : Union[str, Any] = OpenAIGPTTokenizerFast lowerCamelCase : Any = True lowerCamelCase : List[Any] = False def SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] UpperCAmelCase_ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) UpperCAmelCase_ = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""] UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(lowerCamelCase_ ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(lowerCamelCase_ ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowercase : Optional[Any] ): '''simple docstring''' return "lower newer", "lower newer" def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase_ = """lower""" UpperCAmelCase_ = ["""low""", """er</w>"""] UpperCAmelCase_ = tokenizer.tokenize(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) UpperCAmelCase_ = tokens + ["""<unk>"""] UpperCAmelCase_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowercase : Optional[Any]=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) # Simple input UpperCAmelCase_ = """This is a simple input""" UpperCAmelCase_ = ["""This is a simple input 1""", """This is a simple input 2"""] UpperCAmelCase_ = ("""This is a simple input""", """This is a pair""") UpperCAmelCase_ = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" ) # Simple input self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" ) # Simple input self.assertRaises( lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" , ) # Pair input self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" ) # Pair input self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" ) # Pair input self.assertRaises( lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" , ) def SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' pass @require_ftfy @require_spacy @require_tokenizers class _UpperCamelCase ( a__ ): '''simple docstring''' pass
710
import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint UpperCamelCase__ : int = { """169M""": 12, """430M""": 24, """1B5""": 24, """3B""": 32, """7B""": 32, """14B""": 40, } UpperCamelCase__ : Union[str, Any] = { """169M""": 7_68, """430M""": 10_24, """1B5""": 20_48, """3B""": 25_60, """7B""": 40_96, """14B""": 51_20, } def A_( A ): UpperCAmelCase_ = list(state_dict.keys() ) for name in state_dict_keys: UpperCAmelCase_ = state_dict.pop(A ) # emb -> embedding if name.startswith("""emb.""" ): UpperCAmelCase_ = name.replace("""emb.""" , """embeddings.""" ) # ln_0 -> pre_ln (only present at block 0) if name.startswith("""blocks.0.ln0""" ): UpperCAmelCase_ = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" ) # att -> attention UpperCAmelCase_ = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , A ) # ffn -> feed_forward UpperCAmelCase_ = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , A ) # time_mix_k -> time_mix_key and reshape if name.endswith(""".time_mix_k""" ): UpperCAmelCase_ = name.replace(""".time_mix_k""" , """.time_mix_key""" ) # time_mix_v -> time_mix_value and reshape if name.endswith(""".time_mix_v""" ): UpperCAmelCase_ = name.replace(""".time_mix_v""" , """.time_mix_value""" ) # time_mix_r -> time_mix_key and reshape if name.endswith(""".time_mix_r""" ): UpperCAmelCase_ = name.replace(""".time_mix_r""" , """.time_mix_receptance""" ) if name != "head.weight": UpperCAmelCase_ = """rwkv.""" + name UpperCAmelCase_ = weight return state_dict def A_( A , A , A , A=None , A=None , A=False , A=None ): # 1. If possible, build the tokenizer. if tokenizer_file is None: print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" ) UpperCAmelCase_ = 50277 UpperCAmelCase_ = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" ) else: UpperCAmelCase_ = PreTrainedTokenizerFast(tokenizer_file=A ) UpperCAmelCase_ = len(A ) tokenizer.save_pretrained(A ) # 2. Build the config UpperCAmelCase_ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: UpperCAmelCase_ = candidate break if size is None: raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" ) if size not in possible_sizes: raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" ) UpperCAmelCase_ = RwkvConfig( vocab_size=A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(A ) # 3. Download model file then convert state_dict UpperCAmelCase_ = hf_hub_download(A , A ) UpperCAmelCase_ = torch.load(A , map_location="""cpu""" ) UpperCAmelCase_ = convert_state_dict(A ) # 4. Split in shards and save UpperCAmelCase_ , UpperCAmelCase_ = shard_checkpoint(A ) for shard_file, shard in shards.items(): torch.save(A , os.path.join(A , A ) ) if index is not None: UpperCAmelCase_ = os.path.join(A , A ) # Save the index as well with open(A , """w""" , encoding="""utf-8""" ) as f: UpperCAmelCase_ = json.dumps(A , indent=2 , sort_keys=A ) + """\n""" f.write(A ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( """Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" ) UpperCAmelCase_ = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: UpperCAmelCase_ = torch.load(os.path.join(A , A ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(A , A ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" ) UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(A ) model.push_to_hub(A , max_shard_size="""2GB""" ) tokenizer.push_to_hub(A ) if __name__ == "__main__": UpperCamelCase__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint.""" ) parser.add_argument( """--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo.""" ) parser.add_argument( """--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model.""" ) parser.add_argument( """--tokenizer_file""", default=None, type=str, help="""Path to the tokenizer file to use (if not provided, only the model is converted).""", ) parser.add_argument( """--size""", default=None, type=str, help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Push to the Hub the converted model.""", ) parser.add_argument( """--model_name""", default=None, type=str, help="""Name of the pushed model on the Hub, including the username / organization.""", ) UpperCamelCase__ : List[str] = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
486
0