code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" def lowercase__(A ) ->bool: """simple docstring""" lowercase__ : Dict= n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
85
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() a : Optional[Any] = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]: """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) lowercase__ : List[Any]= config_class.from_json_file(A ) lowercase__ : Any= True lowercase__ : List[str]= True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ : Optional[int]= model_class(A ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ : List[str]= cached_file( A , A , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A ) if compare_with_pt_model: lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" ) lowercase__ : Union[str, Any]= pt_model_class.from_pretrained( pretrained_model_name_or_path=A , config=A , state_dict=A ) with torch.no_grad(): lowercase__ : str= pt_model(**pt_model.dummy_inputs ) lowercase__ : Tuple= pto[0].numpy() lowercase__ : List[Any]= tfo[0].numpy() lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(A , save_format="h5" ) def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]: """simple docstring""" if args_model_type is None: lowercase__ : Tuple= list(MODEL_CLASSES.keys() ) else: lowercase__ : Optional[int]= [args_model_type] for j, model_type in enumerate(A , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(A )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ : int= list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ : Any= model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(A , A ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ : Any= model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Union[str, Any]= config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ : str= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Any= model_shortcut_name if os.path.isfile(A ): lowercase__ : Dict= "converted_model" convert_pt_checkpoint_to_tf( model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , ) if remove_cached_files: os.remove(A ) os.remove(A ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """ """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") a : List[str] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
85
1
"""simple docstring""" import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() a : Any = logging.get_logger(__name__) set_seed(770) a : List[str] = { """c_attn""": """att_proj""", """c_proj""": """out_proj""", """c_fc""": """in_proj""", """transformer.""": """""", """h.""": """layers.""", """ln_1""": """layernorm_1""", """ln_2""": """layernorm_2""", """ln_f""": """layernorm_final""", """wpe""": """position_embeds_layer""", """wte""": """input_embeds_layer""", } a : Any = { """text_small""": { """repo_id""": """suno/bark""", """file_name""": """text.pt""", }, """coarse_small""": { """repo_id""": """suno/bark""", """file_name""": """coarse.pt""", }, """fine_small""": { """repo_id""": """suno/bark""", """file_name""": """fine.pt""", }, """text""": { """repo_id""": """suno/bark""", """file_name""": """text_2.pt""", }, """coarse""": { """repo_id""": """suno/bark""", """file_name""": """coarse_2.pt""", }, """fine""": { """repo_id""": """suno/bark""", """file_name""": """fine_2.pt""", }, } a : str = os.path.dirname(os.path.abspath(__file__)) a : Optional[Any] = os.path.join(os.path.expanduser("""~"""), """.cache""") a : int = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""") def lowercase__(A , A=False ) ->Tuple: """simple docstring""" lowercase__ : List[Any]= model_type if use_small: key += "_small" return os.path.join(A , REMOTE_MODEL_PATHS[key]["file_name"] ) def lowercase__(A , A ) ->str: """simple docstring""" os.makedirs(A , exist_ok=A ) hf_hub_download(repo_id=A , filename=A , local_dir=A ) def lowercase__(A , A , A=False , A="text" ) ->int: """simple docstring""" if model_type == "text": lowercase__ : Dict= BarkSemanticModel lowercase__ : Optional[int]= BarkSemanticConfig lowercase__ : List[str]= BarkSemanticGenerationConfig elif model_type == "coarse": lowercase__ : Optional[int]= BarkCoarseModel lowercase__ : Tuple= BarkCoarseConfig lowercase__ : Any= BarkCoarseGenerationConfig elif model_type == "fine": lowercase__ : Tuple= BarkFineModel lowercase__ : List[Any]= BarkFineConfig lowercase__ : str= BarkFineGenerationConfig else: raise NotImplementedError() lowercase__ : Dict= f'''{model_type}_small''' if use_small else model_type lowercase__ : Dict= REMOTE_MODEL_PATHS[model_key] if not os.path.exists(A ): logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' ) _download(model_info["repo_id"] , model_info["file_name"] ) lowercase__ : Optional[Any]= torch.load(A , map_location=A ) # this is a hack lowercase__ : Union[str, Any]= checkpoint["model_args"] if "input_vocab_size" not in model_args: lowercase__ : str= model_args["vocab_size"] lowercase__ : List[Any]= model_args["vocab_size"] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments lowercase__ : Any= model_args.pop("n_head" ) lowercase__ : Tuple= model_args.pop("n_embd" ) lowercase__ : int= model_args.pop("n_layer" ) lowercase__ : Optional[Any]= ConfigClass(**checkpoint["model_args"] ) lowercase__ : str= ModelClass(config=A ) lowercase__ : Union[str, Any]= GenerationConfigClass() lowercase__ : Any= model_generation_config lowercase__ : Optional[int]= checkpoint["model"] # fixup checkpoint lowercase__ : Optional[Any]= "_orig_mod." for k, v in list(state_dict.items() ): if k.startswith(A ): # replace part of the key with corresponding layer name in HF implementation lowercase__ : List[Any]= k[len(A ) :] for old_layer_name in new_layer_name_dict: lowercase__ : Tuple= new_k.replace(A , new_layer_name_dict[old_layer_name] ) lowercase__ : List[Any]= state_dict.pop(A ) lowercase__ : Tuple= set(state_dict.keys() ) - set(model.state_dict().keys() ) lowercase__ : Optional[Any]= {k for k in extra_keys if not k.endswith(".attn.bias" )} lowercase__ : str= set(model.state_dict().keys() ) - set(state_dict.keys() ) lowercase__ : Optional[Any]= {k for k in missing_keys if not k.endswith(".attn.bias" )} if len(A ) != 0: raise ValueError(f'''extra keys found: {extra_keys}''' ) if len(A ) != 0: raise ValueError(f'''missing keys: {missing_keys}''' ) model.load_state_dict(A , strict=A ) lowercase__ : Optional[Any]= model.num_parameters(exclude_embeddings=A ) lowercase__ : Union[str, Any]= checkpoint["best_val_loss"].item() logger.info(f'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(A , 3 )} loss''' ) model.eval() model.to(A ) del checkpoint, state_dict return model def lowercase__(A , A=False , A="text" ) ->Optional[int]: """simple docstring""" if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() lowercase__ : Optional[int]= "cpu" # do conversion on cpu lowercase__ : List[Any]= _get_ckpt_path(A , use_small=A ) lowercase__ : Optional[int]= _load_model(A , A , model_type=A , use_small=A ) # load bark initial model lowercase__ : Tuple= _bark_load_model(A , "cpu" , model_type=A , use_small=A ) if model_type == "text": lowercase__ : Optional[Any]= bark_model["model"] if model.num_parameters(exclude_embeddings=A ) != bark_model.get_num_params(): raise ValueError("initial and new models don't have the same number of parameters" ) # check if same output as the bark model lowercase__ : List[Any]= 5 lowercase__ : int= 10 if model_type in ["text", "coarse"]: lowercase__ : Union[str, Any]= torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) lowercase__ : str= bark_model(A )[0] lowercase__ : List[Any]= model(A ) # take last logits lowercase__ : List[str]= output_new_model_total.logits[:, [-1], :] else: lowercase__ : Dict= 3 lowercase__ : Optional[int]= 8 lowercase__ : Tuple= torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) lowercase__ : Optional[Any]= model(A , A ) lowercase__ : List[str]= bark_model(A , A ) lowercase__ : Union[str, Any]= output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError("initial and new outputs don't have the same shape" ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError("initial and new outputs are not equal" ) Path(A ).mkdir(exist_ok=A ) model.save_pretrained(A ) def lowercase__(A , A , A , A , A , A , ) ->List[str]: """simple docstring""" lowercase__ : Any= os.path.join(A , A ) lowercase__ : int= BarkSemanticConfig.from_pretrained(os.path.join(A , "config.json" ) ) lowercase__ : List[str]= BarkCoarseConfig.from_pretrained(os.path.join(A , "config.json" ) ) lowercase__ : str= BarkFineConfig.from_pretrained(os.path.join(A , "config.json" ) ) lowercase__ : Optional[int]= EncodecConfig.from_pretrained("facebook/encodec_24khz" ) lowercase__ : str= BarkSemanticModel.from_pretrained(A ) lowercase__ : int= BarkCoarseModel.from_pretrained(A ) lowercase__ : Dict= BarkFineModel.from_pretrained(A ) lowercase__ : Optional[int]= EncodecModel.from_pretrained("facebook/encodec_24khz" ) lowercase__ : Union[str, Any]= BarkConfig.from_sub_model_configs( A , A , A , A ) lowercase__ : str= BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) lowercase__ : str= BarkModel(A ) lowercase__ : Any= semantic lowercase__ : Union[str, Any]= coarseAcoustic lowercase__ : List[str]= fineAcoustic lowercase__ : List[Any]= codec lowercase__ : List[str]= bark_generation_config Path(A ).mkdir(exist_ok=A ) bark.save_pretrained(A , repo_id=A , push_to_hub=A ) if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""") a : Union[str, Any] = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
85
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" ) lowercase__ : str= { "input_ids": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute" "attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } lowercase__ : Union[str, Any]= model(snake_case__ )["last_hidden_state"] lowercase__ : Optional[int]= tf.TensorShape((1, 6, 768) ) self.assertEqual(output.shape , snake_case__ ) # compare the actual values for a slice. lowercase__ : Tuple= tf.convert_to_tensor( [ [ [0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04], [-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44], [-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
85
"""simple docstring""" def lowercase__(A ) ->list: """simple docstring""" if n_term == "": return [] lowercase__ : list= [] for temp in range(int(A ) ): series.append(f'''1/{temp + 1}''' if series else "1" ) return series if __name__ == "__main__": a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""") print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""") print(harmonic_series(nth_term))
85
1
"""simple docstring""" from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig a : Union[str, Any] = { """susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""", """susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""", } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "ernie_m" __lowerCamelCase = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self , snake_case__ = 250002 , snake_case__ = 768 , snake_case__ = 12 , snake_case__ = 12 , snake_case__ = 3072 , snake_case__ = "gelu" , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 514 , snake_case__ = 0.02 , snake_case__ = 1 , snake_case__ = 1e-05 , snake_case__=None , snake_case__=False , snake_case__=0.0 , **snake_case__ , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , **snake_case__ ) lowercase__ : Any= vocab_size lowercase__ : List[Any]= hidden_size lowercase__ : Union[str, Any]= num_hidden_layers lowercase__ : Dict= num_attention_heads lowercase__ : List[str]= intermediate_size lowercase__ : List[Any]= hidden_act lowercase__ : Optional[int]= hidden_dropout_prob lowercase__ : Dict= attention_probs_dropout_prob lowercase__ : str= max_position_embeddings lowercase__ : Any= initializer_range lowercase__ : Tuple= layer_norm_eps lowercase__ : Tuple= classifier_dropout lowercase__ : int= is_decoder lowercase__ : List[str]= act_dropout
85
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : int = logging.get_logger(__name__) a : str = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "big_bird" def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , ) lowercase__ : Dict= vocab_size lowercase__ : Optional[int]= max_position_embeddings lowercase__ : List[Any]= hidden_size lowercase__ : List[str]= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : Optional[int]= intermediate_size lowercase__ : Optional[int]= hidden_act lowercase__ : Tuple= hidden_dropout_prob lowercase__ : int= attention_probs_dropout_prob lowercase__ : int= initializer_range lowercase__ : List[Any]= type_vocab_size lowercase__ : Union[str, Any]= layer_norm_eps lowercase__ : Optional[Any]= use_cache lowercase__ : Union[str, Any]= rescale_embeddings lowercase__ : Union[str, Any]= attention_type lowercase__ : Any= use_bias lowercase__ : List[Any]= block_size lowercase__ : Optional[Any]= num_random_blocks lowercase__ : Optional[int]= classifier_dropout class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def UpperCAmelCase_ ( self ): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : Tuple= {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
85
1
"""simple docstring""" from ...processing_utils import ProcessorMixin class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "feature_extractor"] __lowerCamelCase = "TvltImageProcessor" __lowerCamelCase = "TvltFeatureExtractor" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(image_processor=snake_case__ , feature_extractor=snake_case__ ) lowercase__ : Union[str, Any]= image_processor lowercase__ : int= feature_extractor def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=False , *snake_case__ , **snake_case__ , ): '''simple docstring''' if images is None and audio is None: raise ValueError("You need to specify either an `images` or `audio` input to process." ) lowercase__ : Union[str, Any]= None if images is not None: lowercase__ : Tuple= self.image_processor(snake_case__ , mask_pixel=snake_case__ , *snake_case__ , **snake_case__ ) if images_mixed is not None: lowercase__ : str= self.image_processor(snake_case__ , is_mixed=snake_case__ , *snake_case__ , **snake_case__ ) if audio is not None: lowercase__ : Tuple= self.feature_extractor( snake_case__ , *snake_case__ , sampling_rate=snake_case__ , mask_audio=snake_case__ , **snake_case__ ) lowercase__ : Tuple= {} if audio is not None: output_dict.update(snake_case__ ) if images is not None: output_dict.update(snake_case__ ) if images_mixed_dict is not None: output_dict.update(snake_case__ ) return output_dict @property def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= self.image_processor.model_input_names lowercase__ : Any= self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
85
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
85
1
"""simple docstring""" a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/""" def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ): lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(A ) lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data ) lowercase__ : Tuple= len(A ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(A ) % 6) else: lowercase__ : str= b"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(A ) , 6 ) ).encode() + padding ) def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ) and not isinstance(A , A ): lowercase__ : str= ( "argument should be a bytes-like object or ASCII string, " f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(A , A ): try: lowercase__ : Optional[Any]= encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) lowercase__ : List[Any]= encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase__ : str= encoded_data[:-padding] lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data ) lowercase__ : Any= [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(A ) , 8 ) ] return bytes(A ) if __name__ == "__main__": import doctest doctest.testmod()
85
"""simple docstring""" from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def lowercase__(A , A ) ->List[Any]: """simple docstring""" lowercase__ : str= [] for part_id in partition_order: lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(A ): expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Dict= Spark(A ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(2 ) lowercase__ : Optional[Any]= [1, 0] lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions. lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->int: """simple docstring""" lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(1 ) lowercase__ : str= SparkExamplesIterable(A ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(A ): assert row_id == f'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : int= spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: lowercase__ : Optional[Any]= lambda A : x.reverse() lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] ) lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Any: """simple docstring""" lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 ) # Partitions 0 and 2 lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Optional[int]= Spark(A ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
85
1
"""simple docstring""" import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig a : Optional[Any] = logging.get_logger(__name__) a : Optional[Any] = """T5Config""" def lowercase__(A , A , A ) ->jnp.ndarray: """simple docstring""" lowercase__ : Tuple= jnp.zeros_like(A ) lowercase__ : str= shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) lowercase__ : Optional[int]= shifted_input_ids.at[:, 0].set(A ) lowercase__ : Optional[int]= jnp.where(shifted_input_ids == -100 , A , A ) return shifted_input_ids class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "mt5" __lowerCamelCase = MTaConfig class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "mt5" __lowerCamelCase = MTaConfig class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "mt5" __lowerCamelCase = MTaConfig
85
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ): '''simple docstring''' lowercase__ : Optional[int]= parent lowercase__ : Tuple= batch_size lowercase__ : Tuple= seq_length lowercase__ : str= is_training lowercase__ : str= use_input_lengths lowercase__ : Any= use_token_type_ids lowercase__ : List[Any]= use_labels lowercase__ : Optional[int]= gelu_activation lowercase__ : str= sinusoidal_embeddings lowercase__ : List[str]= causal lowercase__ : Any= asm lowercase__ : Optional[int]= n_langs lowercase__ : Union[str, Any]= vocab_size lowercase__ : int= n_special lowercase__ : Any= hidden_size lowercase__ : int= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : List[str]= hidden_dropout_prob lowercase__ : str= attention_probs_dropout_prob lowercase__ : Any= max_position_embeddings lowercase__ : List[Any]= type_vocab_size lowercase__ : int= type_sequence_label_size lowercase__ : Any= initializer_range lowercase__ : Optional[int]= num_labels lowercase__ : Union[str, Any]= num_choices lowercase__ : List[Any]= summary_type lowercase__ : Optional[int]= use_proj lowercase__ : int= scope def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Tuple= None if self.use_input_lengths: lowercase__ : List[Any]= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase__ : Tuple= None if self.use_token_type_ids: lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase__ : str= None lowercase__ : Tuple= None lowercase__ : Dict= None if self.use_labels: lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float() lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : List[Any]= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase_ ( self ): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : Any= FlaubertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ ) lowercase__ : str= model(snake_case__ , langs=snake_case__ ) lowercase__ : Any= model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : List[str]= model(snake_case__ ) lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= model(snake_case__ ) lowercase__ : Any= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , ) lowercase__ : List[str]= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , ) ((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple() lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) ((lowercase__), ) : List[Any]= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ ) lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= self.num_labels lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : int= self.num_choices lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Any= model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ) : Any= config_and_inputs lowercase__ : Tuple= { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) __lowerCamelCase = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowercase__ : List[Any]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) lowercase__ : List[str]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModelTester(self ) lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowercase__ : int= True lowercase__ : List[Any]= model_class(config=snake_case__ ) lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ ) lowercase__ : Dict= torch.jit.trace( snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) ) lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ ) loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) ) @require_torch class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): lowercase__ : Optional[int]= model(snake_case__ )[0] lowercase__ : Optional[int]= torch.Size((1, 11, 768) ) self.assertEqual(output.shape , snake_case__ ) lowercase__ : Dict= torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
85
1
"""simple docstring""" import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def lowercase__(A , A ) ->np.array: """simple docstring""" lowercase__ : List[Any]= f'''{sampling_rate}''' lowercase__ : Union[str, Any]= "1" lowercase__ : Optional[Any]= "f32le" lowercase__ : str= [ "ffmpeg", "-i", "pipe:0", "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-hide_banner", "-loglevel", "quiet", "pipe:1", ] try: with subprocess.Popen(A , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: lowercase__ : str= ffmpeg_process.communicate(A ) except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error lowercase__ : Union[str, Any]= output_stream[0] lowercase__ : Optional[int]= np.frombuffer(A , np.floataa ) if audio.shape[0] == 0: raise ValueError("Malformed soundfile" ) return audio def lowercase__(A , A , A = "f32le" , ) ->Dict: """simple docstring""" lowercase__ : Union[str, Any]= f'''{sampling_rate}''' lowercase__ : Union[str, Any]= "1" if format_for_conversion == "s16le": lowercase__ : Tuple= 2 elif format_for_conversion == "f32le": lowercase__ : List[str]= 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) lowercase__ : Tuple= platform.system() if system == "Linux": lowercase__ : Any= "alsa" lowercase__ : str= "default" elif system == "Darwin": lowercase__ : str= "avfoundation" lowercase__ : Optional[Any]= ":0" elif system == "Windows": lowercase__ : Dict= "dshow" lowercase__ : Union[str, Any]= "default" lowercase__ : int= [ "ffmpeg", "-f", format_, "-i", input_, "-ac", ac, "-ar", ar, "-f", format_for_conversion, "-fflags", "nobuffer", "-hide_banner", "-loglevel", "quiet", "pipe:1", ] lowercase__ : List[Any]= int(round(sampling_rate * chunk_length_s ) ) * size_of_sample lowercase__ : str= _ffmpeg_stream(A , A ) for item in iterator: yield item def lowercase__(A , A , A = None , A = None , A = "f32le" , ) ->Optional[int]: """simple docstring""" if stream_chunk_s is not None: lowercase__ : Optional[Any]= stream_chunk_s else: lowercase__ : Optional[Any]= chunk_length_s lowercase__ : Union[str, Any]= ffmpeg_microphone(A , A , format_for_conversion=A ) if format_for_conversion == "s16le": lowercase__ : str= np.intaa lowercase__ : Union[str, Any]= 2 elif format_for_conversion == "f32le": lowercase__ : Optional[Any]= np.floataa lowercase__ : List[str]= 4 else: raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: lowercase__ : Optional[int]= chunk_length_s / 6 lowercase__ : Union[str, Any]= int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(A , (int, float) ): lowercase__ : Optional[int]= [stride_length_s, stride_length_s] lowercase__ : List[Any]= int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample lowercase__ : Union[str, Any]= int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample lowercase__ : Union[str, Any]= datetime.datetime.now() lowercase__ : List[str]= datetime.timedelta(seconds=A ) for item in chunk_bytes_iter(A , A , stride=(stride_left, stride_right) , stream=A ): # Put everything back in numpy scale lowercase__ : Tuple= np.frombuffer(item["raw"] , dtype=A ) lowercase__ : Dict= ( item["stride"][0] // size_of_sample, item["stride"][1] // size_of_sample, ) lowercase__ : Optional[Any]= sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def lowercase__(A , A , A , A = False ) ->int: """simple docstring""" lowercase__ : int= b"" lowercase__, lowercase__ : List[Any]= stride if stride_left + stride_right >= chunk_len: raise ValueError( f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) lowercase__ : Optional[int]= 0 for raw in iterator: acc += raw if stream and len(A ) < chunk_len: lowercase__ : Union[str, Any]= (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(A ) >= chunk_len: # We are flushing the accumulator lowercase__ : Optional[Any]= (_stride_left, stride_right) lowercase__ : Tuple= {"raw": acc[:chunk_len], "stride": stride} if stream: lowercase__ : int= False yield item lowercase__ : List[Any]= stride_left lowercase__ : str= acc[chunk_len - stride_left - stride_right :] # Last chunk if len(A ) > stride_left: lowercase__ : int= {"raw": acc, "stride": (_stride_left, 0)} if stream: lowercase__ : Any= False yield item def lowercase__(A , A ) ->Optional[int]: """simple docstring""" lowercase__ : Optional[int]= 2**24 # 16Mo try: with subprocess.Popen(A , stdout=subprocess.PIPE , bufsize=A ) as ffmpeg_process: while True: lowercase__ : List[Any]= ffmpeg_process.stdout.read(A ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
85
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = None class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 2 @register_to_config def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ): '''simple docstring''' # standard deviation of the initial noise distribution lowercase__ : int= sigma_max # setable values lowercase__ : int= None lowercase__ : np.IntTensor= None lowercase__ : torch.FloatTensor= None # sigma(t_i) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' return sample def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' lowercase__ : List[Any]= num_inference_steps lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy() lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ ) lowercase__ : Union[str, Any]= [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: lowercase__ : str= 0 # sample eps ~ N(0, S_noise^2 * I) lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device ) lowercase__ : str= sigma + gamma * sigma lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : int= sample_prev + sigma_prev * model_output lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' raise NotImplementedError()
85
1
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ): lowercase__ : Optional[Any]= model_result["result"][batch_size][sequence_length] self.assertIsNotNone(snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= "sshleifer/tiny-gpt2" lowercase__ : Optional[Any]= PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , ) lowercase__ : Dict= PyTorchBenchmark(snake_case__ ) lowercase__ : Dict= benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= "sgugger/tiny-distilbert-classification" lowercase__ : Optional[int]= PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , only_pretrain_model=snake_case__ , ) lowercase__ : List[str]= PyTorchBenchmark(snake_case__ ) lowercase__ : int= benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= "sshleifer/tiny-gpt2" lowercase__ : Optional[int]= PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , torchscript=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , ) lowercase__ : Dict= PyTorchBenchmark(snake_case__ ) lowercase__ : List[str]= benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision" ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= "sshleifer/tiny-gpt2" lowercase__ : Union[str, Any]= PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , fpaa=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , ) lowercase__ : List[str]= PyTorchBenchmark(snake_case__ ) lowercase__ : List[Any]= benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= "sshleifer/tiny-gpt2" lowercase__ : int= AutoConfig.from_pretrained(snake_case__ ) # set architectures equal to `None` lowercase__ : List[str]= None lowercase__ : str= PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , ) lowercase__ : List[Any]= PyTorchBenchmark(snake_case__ , configs=[config] ) lowercase__ : List[Any]= benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= "sshleifer/tiny-gpt2" lowercase__ : Optional[Any]= PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , ) lowercase__ : Dict= PyTorchBenchmark(snake_case__ ) lowercase__ : Union[str, Any]= benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == "cpu" , "Can't do half precision" ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= "sshleifer/tiny-gpt2" lowercase__ : Union[str, Any]= PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=snake_case__ , multi_process=snake_case__ , ) lowercase__ : Optional[Any]= PyTorchBenchmark(snake_case__ ) lowercase__ : str= benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= "sshleifer/tiny-gpt2" lowercase__ : str= AutoConfig.from_pretrained(snake_case__ ) lowercase__ : Union[str, Any]= PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , ) lowercase__ : Any= PyTorchBenchmark(snake_case__ , configs=[config] ) lowercase__ : Tuple= benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= "sshleifer/tinier_bart" lowercase__ : str= AutoConfig.from_pretrained(snake_case__ ) lowercase__ : List[Any]= PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , ) lowercase__ : Tuple= PyTorchBenchmark(snake_case__ , configs=[config] ) lowercase__ : List[Any]= benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= "sshleifer/tiny-gpt2" lowercase__ : str= AutoConfig.from_pretrained(snake_case__ ) lowercase__ : str= PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , ) lowercase__ : List[Any]= PyTorchBenchmark(snake_case__ , configs=[config] ) lowercase__ : Tuple= benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= "sshleifer/tinier_bart" lowercase__ : Tuple= AutoConfig.from_pretrained(snake_case__ ) lowercase__ : int= PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case__ , ) lowercase__ : Union[str, Any]= PyTorchBenchmark(snake_case__ , configs=[config] ) lowercase__ : List[str]= benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ : Any= PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , save_to_csv=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(snake_case__ , "inf_time.csv" ) , train_memory_csv_file=os.path.join(snake_case__ , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(snake_case__ , "inf_mem.csv" ) , train_time_csv_file=os.path.join(snake_case__ , "train_time.csv" ) , env_info_csv_file=os.path.join(snake_case__ , "env.csv" ) , multi_process=snake_case__ , ) lowercase__ : Optional[Any]= PyTorchBenchmark(snake_case__ ) benchmark.run() self.assertTrue(Path(os.path.join(snake_case__ , "inf_time.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(snake_case__ , "train_time.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(snake_case__ , "inf_mem.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(snake_case__ , "train_mem.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(snake_case__ , "env.csv" ) ).exists() ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(snake_case__ ): self.assertTrue(hasattr(snake_case__ , "sequential" ) ) self.assertTrue(hasattr(snake_case__ , "cumulative" ) ) self.assertTrue(hasattr(snake_case__ , "current" ) ) self.assertTrue(hasattr(snake_case__ , "total" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ : Any= PyTorchBenchmarkArguments( models=[MODEL_ID] , training=snake_case__ , inference=snake_case__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(snake_case__ , "log.txt" ) , log_print=snake_case__ , trace_memory_line_by_line=snake_case__ , multi_process=snake_case__ , ) lowercase__ : Optional[Any]= PyTorchBenchmark(snake_case__ ) lowercase__ : int= benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(snake_case__ , "log.txt" ) ).exists() )
85
"""simple docstring""" from ....utils import logging a : List[str] = logging.get_logger(__name__) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ): '''simple docstring''' lowercase__ : Dict= config.__dict__ lowercase__ : str= modal_hidden_size if num_labels: lowercase__ : List[str]= num_labels
85
1
"""simple docstring""" from __future__ import annotations from fractions import Fraction def lowercase__(A , A ) ->bool: """simple docstring""" return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def lowercase__(A ) ->list[str]: """simple docstring""" lowercase__ : Tuple= [] lowercase__ : Dict= 11 lowercase__ : str= int("1" + "0" * digit_len ) for num in range(A , A ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(A , A ): solutions.append(f'''{num}/{den}''' ) den += 1 num += 1 lowercase__ : str= 10 return solutions def lowercase__(A = 2 ) ->int: """simple docstring""" lowercase__ : Dict= 1.0 for fraction in fraction_list(A ): lowercase__ : Any= Fraction(A ) result *= frac.denominator / frac.numerator return int(A ) if __name__ == "__main__": print(solution())
85
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase__(A ) ->int: """simple docstring""" lowercase__ : Optional[int]= [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def lowercase__(A , A ) ->Any: """simple docstring""" lowercase__ : Any= [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def lowercase__(A ) ->List[Any]: """simple docstring""" lowercase__ : Dict= [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") ) return token def lowercase__() ->Union[str, Any]: """simple docstring""" lowercase__ : Dict= [] head.append(("layernorm.weight", "norm.weight") ) head.append(("layernorm.bias", "norm.bias") ) head.append(("classifier.weight", "head.weight") ) head.append(("classifier.bias", "head.bias") ) return head def lowercase__(A , A , A , A ) ->Optional[int]: """simple docstring""" lowercase__ : List[str]= "imagenet-1k-id2label.json" lowercase__ : List[str]= 1_000 lowercase__ : Tuple= "huggingface/label-files" lowercase__ : int= num_labels lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) ) lowercase__ : str= {int(A ): v for k, v in idalabel.items()} lowercase__ : Optional[int]= idalabel lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()} lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13": lowercase__ : int= [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21": lowercase__ : Union[str, Any]= [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Optional[Any]= [2, 2, 20] lowercase__ : Optional[Any]= [3, 12, 16] lowercase__ : List[str]= [192, 768, 1_024] lowercase__ : List[str]= CvtForImageClassification(A ) lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) lowercase__ : Dict= image_size lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) ) lowercase__ : Optional[Any]= OrderedDict() lowercase__ : Tuple= [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowercase__ : Optional[int]= list_of_state_dict + cls_token(A ) lowercase__ : List[str]= list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): lowercase__ : Dict= list_of_state_dict + attention(A , A ) lowercase__ : Optional[Any]= list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): lowercase__ : str= original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": a : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--cvt_model""", default="""cvt-w24""", type=str, help="""Name of the cvt model you'd like to convert.""", ) parser.add_argument( """--image_size""", default=384, type=int, help="""Input Image Size""", ) parser.add_argument( """--cvt_file_name""", default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""", type=str, help="""Input Image Size""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a : Optional[int] = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
85
1
"""simple docstring""" import string import numpy def lowercase__(A , A ) ->int: """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a , A ) class __UpperCAmelCase: """simple docstring""" __lowerCamelCase = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) __lowerCamelCase = numpy.vectorize(lambda SCREAMING_SNAKE_CASE__ : x % 36 ) __lowerCamelCase = numpy.vectorize(SCREAMING_SNAKE_CASE__ ) def __init__( self , snake_case__ ): '''simple docstring''' lowercase__ : Dict= self.modulus(snake_case__ ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key lowercase__ : Tuple= encrypt_key.shape[0] def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' return self.key_string.index(snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' return self.key_string[round(snake_case__ )] def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: lowercase__ : List[Any]= det % len(self.key_string ) lowercase__ : Dict= len(self.key_string ) if greatest_common_divisor(snake_case__ , len(self.key_string ) ) != 1: lowercase__ : Optional[Any]= ( F'''determinant modular {req_l} of encryption key({det}) ''' F'''is not co prime w.r.t {req_l}.\nTry another key.''' ) raise ValueError(snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : List[str]= [char for char in text.upper() if char in self.key_string] lowercase__ : str= chars[-1] while len(snake_case__ ) % self.break_key != 0: chars.append(snake_case__ ) return "".join(snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Any= self.process_text(text.upper() ) lowercase__ : Tuple= "" for i in range(0 , len(snake_case__ ) - self.break_key + 1 , self.break_key ): lowercase__ : Optional[int]= text[i : i + self.break_key] lowercase__ : List[str]= [self.replace_letters(snake_case__ ) for char in batch] lowercase__ : Optional[int]= numpy.array([vec] ).T lowercase__ : List[str]= self.modulus(self.encrypt_key.dot(snake_case__ ) ).T.tolist()[ 0 ] lowercase__ : List[str]= "".join( self.replace_digits(snake_case__ ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: lowercase__ : Optional[Any]= det % len(self.key_string ) lowercase__ : Dict= None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: lowercase__ : Tuple= i break lowercase__ : List[str]= ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(snake_case__ ) ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : str= self.make_decrypt_key() lowercase__ : int= self.process_text(text.upper() ) lowercase__ : Union[str, Any]= "" for i in range(0 , len(snake_case__ ) - self.break_key + 1 , self.break_key ): lowercase__ : Optional[Any]= text[i : i + self.break_key] lowercase__ : Union[str, Any]= [self.replace_letters(snake_case__ ) for char in batch] lowercase__ : Optional[int]= numpy.array([vec] ).T lowercase__ : Union[str, Any]= self.modulus(decrypt_key.dot(snake_case__ ) ).T.tolist()[0] lowercase__ : Optional[int]= "".join( self.replace_digits(snake_case__ ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def lowercase__() ->None: """simple docstring""" lowercase__ : Optional[int]= int(input("Enter the order of the encryption key: " ) ) lowercase__ : Tuple= [] print("Enter each row of the encryption key with space separated integers" ) for _ in range(A ): lowercase__ : Optional[Any]= [int(A ) for x in input().split()] hill_matrix.append(A ) lowercase__ : Union[str, Any]= HillCipher(numpy.array(A ) ) print("Would you like to encrypt or decrypt some text? (1 or 2)" ) lowercase__ : List[Any]= input("\n1. Encrypt\n2. Decrypt\n" ) if option == "1": lowercase__ : Optional[int]= input("What text would you like to encrypt?: " ) print("Your encrypted text is:" ) print(hc.encrypt(A ) ) elif option == "2": lowercase__ : Dict= input("What text would you like to decrypt?: " ) print("Your decrypted text is:" ) print(hc.decrypt(A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
85
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() self.register_modules(unet=snake_case__ , scheduler=snake_case__ ) @torch.no_grad() def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[Any]= self.unet.config.sample_size lowercase__ : Dict= (batch_size, 3, img_size, img_size) lowercase__ : List[Any]= self.unet lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma lowercase__ : Tuple= sample.to(self.device ) self.scheduler.set_timesteps(snake_case__ ) self.scheduler.set_sigmas(snake_case__ ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # prediction step lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ) lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean lowercase__ : List[str]= sample_mean.clamp(0 , 1 ) lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ : str= self.numpy_to_pil(snake_case__ ) if not return_dict: return (sample,) return ImagePipelineOutput(images=snake_case__ )
85
1
"""simple docstring""" def lowercase__(A ) ->list[int]: """simple docstring""" lowercase__ : List[str]= len(A ) for i in range(A ): for j in range(i + 1 , A ): if numbers[j] < numbers[i]: lowercase__, lowercase__ : List[str]= numbers[j], numbers[i] return numbers if __name__ == "__main__": a : Dict = input("""Enter numbers separated by a comma:\n""").strip() a : List[str] = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
85
"""simple docstring""" def lowercase__(A ) ->list[int]: """simple docstring""" lowercase__ : List[str]= len(A ) for i in range(A ): for j in range(i + 1 , A ): if numbers[j] < numbers[i]: lowercase__, lowercase__ : List[str]= numbers[j], numbers[i] return numbers if __name__ == "__main__": a : Dict = input("""Enter numbers separated by a comma:\n""").strip() a : List[str] = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
85
1
"""simple docstring""" from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class __UpperCAmelCase( yaml.SafeLoader ): """simple docstring""" def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : List[Any]= [self.constructed_objects[key_node] for key_node, _ in node.value] lowercase__ : str= [tuple(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else key for key in keys] lowercase__ : str= Counter(snake_case__ ) lowercase__ : Tuple= [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__=False ): '''simple docstring''' lowercase__ : List[Any]= super().construct_mapping(snake_case__ , deep=snake_case__ ) self._check_no_duplicates_on_constructed_node(snake_case__ ) return mapping def lowercase__(A ) ->Tuple[Optional[str], str]: """simple docstring""" lowercase__ : List[Any]= list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowercase__ : Tuple= full_content[1:].index("---" ) + 1 lowercase__ : str= "\n".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(A ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = {"train_eval_index"} # train-eval-index in the YAML metadata @classmethod def UpperCAmelCase_ ( cls , snake_case__ ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as readme_file: lowercase__, lowercase__ : Any= _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(snake_case__ ) else: return cls() def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' if path.exists(): with open(snake_case__ , encoding="utf-8" ) as readme_file: lowercase__ : str= readme_file.read() else: lowercase__ : Any= None lowercase__ : List[Any]= self._to_readme(snake_case__ ) with open(snake_case__ , "w" , encoding="utf-8" ) as readme_file: readme_file.write(snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ = None ): '''simple docstring''' if readme_content is not None: lowercase__, lowercase__ : Optional[Any]= _split_yaml_from_readme(snake_case__ ) lowercase__ : List[str]= "---\n" + self.to_yaml_string() + "---\n" + content else: lowercase__ : List[Any]= "---\n" + self.to_yaml_string() + "---\n" return full_content @classmethod def UpperCAmelCase_ ( cls , snake_case__ ): '''simple docstring''' lowercase__ : str= yaml.load(snake_case__ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowercase__ : Optional[Any]= { (key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' return yaml.safe_dump( { (key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=snake_case__ , allow_unicode=snake_case__ , encoding="utf-8" , ).decode("utf-8" ) a : Union[str, Any] = { """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser a : Union[str, Any] = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") a : Any = ap.parse_args() a : Any = Path(args.readme_filepath) a : Dict = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
85
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def lowercase__(A ) ->bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase__() ->Iterator[int]: """simple docstring""" lowercase__ : Union[str, Any]= 2 while True: if is_prime(A ): yield num num += 1 def lowercase__(A = 2_000_000 ) ->int: """simple docstring""" return sum(takewhile(lambda A : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"""{solution() = }""")
85
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() self.register_modules(unet=snake_case__ , scheduler=snake_case__ ) @torch.no_grad() def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[Any]= self.unet.config.sample_size lowercase__ : Dict= (batch_size, 3, img_size, img_size) lowercase__ : List[Any]= self.unet lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma lowercase__ : Tuple= sample.to(self.device ) self.scheduler.set_timesteps(snake_case__ ) self.scheduler.set_sigmas(snake_case__ ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # prediction step lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ) lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean lowercase__ : List[str]= sample_mean.clamp(0 , 1 ) lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ : str= self.numpy_to_pil(snake_case__ ) if not return_dict: return (sample,) return ImagePipelineOutput(images=snake_case__ )
85
"""simple docstring""" def lowercase__(A ) ->bool: """simple docstring""" lowercase__ : Tuple= (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowercase__(A = 5_000 ) ->int: """simple docstring""" lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )] for i, pentagonal_i in enumerate(A ): for j in range(A , len(A ) ): lowercase__ : List[Any]= pentagonal_nums[j] lowercase__ : int= pentagonal_i + pentagonal_j lowercase__ : Optional[int]= pentagonal_j - pentagonal_i if is_pentagonal(A ) and is_pentagonal(A ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
85
1
"""simple docstring""" import os import pytest from attr import dataclass a : Dict = """us-east-1""" # defaults region @dataclass class __UpperCAmelCase: """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = "arn:aws:iam::558105141721:role/sagemaker_execution_role" __lowerCamelCase = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5_500, } __lowerCamelCase = {**hyperparameters, "max_steps": 1_000} @property def UpperCAmelCase_ ( self ): '''simple docstring''' if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def UpperCAmelCase_ ( self ): '''simple docstring''' return F'''{self.framework}-transfromers-test''' @property def UpperCAmelCase_ ( self ): '''simple docstring''' return F'''./tests/sagemaker/scripts/{self.framework}''' @property def UpperCAmelCase_ ( self ): '''simple docstring''' if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="class" ) def lowercase__(A ) ->List[Any]: """simple docstring""" lowercase__ : Tuple= SageMakerTestEnvironment(framework=request.cls.framework )
85
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[str] = logging.get_logger(__name__) a : Union[str, Any] = { """google/pix2struct-textcaps-base""": ( """https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json""" ), } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_text_model" __lowerCamelCase = ["past_key_values"] __lowerCamelCase = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' lowercase__ : int= vocab_size lowercase__ : Optional[Any]= hidden_size lowercase__ : Tuple= d_kv lowercase__ : Optional[int]= d_ff lowercase__ : Any= num_layers lowercase__ : Dict= num_heads lowercase__ : List[Any]= relative_attention_num_buckets lowercase__ : Optional[Any]= relative_attention_max_distance lowercase__ : Dict= dropout_rate lowercase__ : Tuple= layer_norm_epsilon lowercase__ : str= initializer_factor lowercase__ : Any= use_cache lowercase__ : Optional[int]= eos_token_id lowercase__ : str= decoder_start_token_id # for backwards compatibility lowercase__ : Optional[Any]= dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : str= config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_vision_model" def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase__ : Tuple= hidden_size lowercase__ : Tuple= patch_embed_hidden_size lowercase__ : Optional[Any]= d_ff lowercase__ : Dict= dropout_rate lowercase__ : Any= num_hidden_layers lowercase__ : Optional[int]= num_attention_heads lowercase__ : Dict= initializer_range lowercase__ : Tuple= initializer_factor lowercase__ : Tuple= attention_dropout lowercase__ : Optional[Any]= layer_norm_eps lowercase__ : List[Any]= dense_act_fn lowercase__ : str= seq_len lowercase__ : List[str]= relative_attention_num_buckets lowercase__ : Union[str, Any]= relative_attention_max_distance lowercase__ : Dict= d_kv @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : Union[str, Any]= config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct" __lowerCamelCase = True def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowercase__ : List[Any]= {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: lowercase__ : str= {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) lowercase__ : str= PixaStructTextConfig(**snake_case__ ) lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ ) lowercase__ : int= self.text_config.decoder_start_token_id lowercase__ : List[Any]= self.text_config.pad_token_id lowercase__ : Any= self.text_config.eos_token_id lowercase__ : Any= initializer_factor lowercase__ : int= initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : Dict= is_vqa @classmethod def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ ) lowercase__ : str= self.text_config.to_dict() lowercase__ : str= self.vision_config.to_dict() lowercase__ : List[str]= self.__class__.model_type return output
85
1
"""simple docstring""" def lowercase__(A , A ) ->int: """simple docstring""" while b: lowercase__, lowercase__ : List[str]= b, a % b return a def lowercase__(A , A ) ->int: """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(A , a % b ) def lowercase__() ->List[str]: """simple docstring""" print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' ) print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' ) print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' ) print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' ) print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' ) print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' ) print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' ) print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' ) print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' ) print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' ) if __name__ == "__main__": main()
85
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" ) lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy() lowercase__ : int= -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
85
1
"""simple docstring""" import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = PhobertTokenizer __lowerCamelCase = False def UpperCAmelCase_ ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__ : Optional[Any]= ["T@@", "i", "I", "R@@", "r", "e@@"] lowercase__ : Dict= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) lowercase__ : Tuple= ["#version: 0.2", "l à</w>"] lowercase__ : Union[str, Any]= {"unk_token": "<unk>"} lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : str= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: for token in vocab_tokens: fp.write(F'''{token} {vocab_tokens[token]}\n''' ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case__ ) ) def UpperCAmelCase_ ( self , **snake_case__ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return PhobertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Optional[Any]= "Tôi là VinAI Research" lowercase__ : Union[str, Any]= "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>" return input_text, output_text def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__ : Dict= "Tôi là VinAI Research" lowercase__ : Optional[int]= "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split() lowercase__ : Dict= tokenizer.tokenize(snake_case__ ) print(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) lowercase__ : Union[str, Any]= tokens + [tokenizer.unk_token] lowercase__ : Tuple= [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
85
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "tokenizer"] __lowerCamelCase = "BridgeTowerImageProcessor" __lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[int]= self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) # add pixel_values + pixel_mask lowercase__ : Optional[int]= self.image_processor( snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ ) encoding.update(snake_case__ ) return encoding def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.tokenizer.model_input_names lowercase__ : List[Any]= self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
85
1
"""simple docstring""" def lowercase__(A = 100 ) ->int: """simple docstring""" lowercase__ : List[Any]= 0 lowercase__ : str= 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(F"""{solution() = }""")
85
"""simple docstring""" import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= tempfile.mkdtemp() lowercase__ : Optional[Any]= 8 # DPR tok lowercase__ : Tuple= [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok lowercase__ : List[Any]= [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple= {"unk_token": "<unk>"} lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case__ ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.get_dummy_dataset() lowercase__ : Optional[Any]= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Tuple= dataset lowercase__ : Optional[int]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Dict= self.get_dummy_dataset() lowercase__ : Tuple= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" ) lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) ) del dataset lowercase__ : List[Any]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowercase__ : Optional[int]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , ) return retriever def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) ) lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" ) lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(snake_case__ , open(snake_case__ , "wb" ) ) lowercase__ : List[Any]= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) lowercase__ : Optional[Any]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= 1 lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever() lowercase__ : Union[str, Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Tuple= self.get_dummy_dataset() retriever.save_pretrained(snake_case__ ) lowercase__ : int= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : Any= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= 1 lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) lowercase__ : Union[str, Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : int= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : Tuple= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= 1 lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) lowercase__ : List[str]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : int= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= 1 lowercase__ : int= self.get_dummy_legacy_index_retriever() lowercase__ : Optional[Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : str= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase_ ( self ): '''simple docstring''' import torch lowercase__ : str= 1 lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever() lowercase__ : str= [[5, 7], [10, 11]] lowercase__ : List[str]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) lowercase__, lowercase__, lowercase__ : Optional[int]= ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , np.ndarray ) lowercase__ : Any= retriever( snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , ) lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer() lowercase__ : Dict= 1 lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) retriever.set_ctx_encoder_tokenizer(snake_case__ ) lowercase__ : List[str]= [[5, 7], [10, 11]] lowercase__ : Any= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) self.assertEqual( len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
85
1
"""simple docstring""" a : Dict = [ 999, 800, 799, 600, 599, 500, 400, 399, 377, 355, 333, 311, 288, 266, 244, 222, 200, 199, 177, 155, 133, 111, 88, 66, 44, 22, 0, ] a : str = [ 999, 976, 952, 928, 905, 882, 858, 857, 810, 762, 715, 714, 572, 429, 428, 286, 285, 238, 190, 143, 142, 118, 95, 71, 47, 24, 0, ] a : List[Any] = [ 999, 988, 977, 966, 955, 944, 933, 922, 911, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 350, 300, 299, 266, 233, 200, 199, 179, 159, 140, 120, 100, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] a : List[str] = [ 999, 995, 992, 989, 985, 981, 978, 975, 971, 967, 964, 961, 957, 956, 951, 947, 942, 937, 933, 928, 923, 919, 914, 913, 908, 903, 897, 892, 887, 881, 876, 871, 870, 864, 858, 852, 846, 840, 834, 828, 827, 820, 813, 806, 799, 792, 785, 784, 777, 770, 763, 756, 749, 742, 741, 733, 724, 716, 707, 699, 698, 688, 677, 666, 656, 655, 645, 634, 623, 613, 612, 598, 584, 570, 569, 555, 541, 527, 526, 505, 484, 483, 462, 440, 439, 396, 395, 352, 351, 308, 307, 264, 263, 220, 219, 176, 132, 88, 44, 0, ] a : int = [ 999, 997, 995, 992, 990, 988, 986, 984, 981, 979, 977, 975, 972, 970, 968, 966, 964, 961, 959, 957, 956, 954, 951, 949, 946, 944, 941, 939, 936, 934, 931, 929, 926, 924, 921, 919, 916, 914, 913, 910, 907, 905, 902, 899, 896, 893, 891, 888, 885, 882, 879, 877, 874, 871, 870, 867, 864, 861, 858, 855, 852, 849, 846, 843, 840, 837, 834, 831, 828, 827, 824, 821, 817, 814, 811, 808, 804, 801, 798, 795, 791, 788, 785, 784, 780, 777, 774, 770, 766, 763, 760, 756, 752, 749, 746, 742, 741, 737, 733, 730, 726, 722, 718, 714, 710, 707, 703, 699, 698, 694, 690, 685, 681, 677, 673, 669, 664, 660, 656, 655, 650, 646, 641, 636, 632, 627, 622, 618, 613, 612, 607, 602, 596, 591, 586, 580, 575, 570, 569, 563, 557, 551, 545, 539, 533, 527, 526, 519, 512, 505, 498, 491, 484, 483, 474, 466, 457, 449, 440, 439, 428, 418, 407, 396, 395, 381, 366, 352, 351, 330, 308, 307, 286, 264, 263, 242, 220, 219, 176, 175, 132, 131, 88, 44, 0, ] a : Dict = [ 999, 991, 982, 974, 966, 958, 950, 941, 933, 925, 916, 908, 900, 899, 874, 850, 825, 800, 799, 700, 600, 500, 400, 300, 200, 100, 0, ] a : Optional[int] = [ 999, 992, 985, 978, 971, 964, 957, 949, 942, 935, 928, 921, 914, 907, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 300, 299, 200, 199, 100, 99, 0, ] a : Dict = [ 999, 996, 992, 989, 985, 982, 979, 975, 972, 968, 965, 961, 958, 955, 951, 948, 944, 941, 938, 934, 931, 927, 924, 920, 917, 914, 910, 907, 903, 900, 899, 891, 884, 876, 869, 861, 853, 846, 838, 830, 823, 815, 808, 800, 799, 788, 777, 766, 755, 744, 733, 722, 711, 700, 699, 688, 677, 666, 655, 644, 633, 622, 611, 600, 599, 585, 571, 557, 542, 528, 514, 500, 499, 485, 471, 457, 442, 428, 414, 400, 399, 379, 359, 340, 320, 300, 299, 279, 259, 240, 220, 200, 199, 166, 133, 100, 99, 66, 33, 0, ]
85
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "tokenizer"] __lowerCamelCase = "AutoImageProcessor" __lowerCamelCase = "AutoTokenizer" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) lowercase__ : List[Any]= self.image_processor def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ): '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if images is not None: lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if text is not None and images is not None: lowercase__ : Any= image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
85
1
"""simple docstring""" import numpy as np def lowercase__(A ) ->np.ndarray: """simple docstring""" return 1 / (1 + np.exp(-vector )) def lowercase__(A ) ->np.ndarray: """simple docstring""" return vector * sigmoid(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": import doctest doctest.testmod()
700
"""simple docstring""" a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/""" def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ): lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(A ) lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data ) lowercase__ : Tuple= len(A ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(A ) % 6) else: lowercase__ : str= b"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(A ) , 6 ) ).encode() + padding ) def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ) and not isinstance(A , A ): lowercase__ : str= ( "argument should be a bytes-like object or ASCII string, " f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(A , A ): try: lowercase__ : Optional[Any]= encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) lowercase__ : List[Any]= encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase__ : str= encoded_data[:-padding] lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data ) lowercase__ : Any= [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(A ) , 8 ) ] return bytes(A ) if __name__ == "__main__": import doctest doctest.testmod()
85
0
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS a : Union[str, Any] = logging.get_logger(__name__) a : Dict = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, """constant""": get_constant_schedule, """constant_w_warmup""": get_constant_schedule_with_warmup, } class __UpperCAmelCase( _snake_case ): """simple docstring""" def __init__( self , snake_case__=None , snake_case__=None , *snake_case__ , **snake_case__ ): '''simple docstring''' super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) if config is None: assert isinstance(self.model , lowerCAmelCase__ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F''' {self.model.__class__}''' ) lowercase__ : Optional[Any]= self.model.config else: lowercase__ : Tuple= config lowercase__ : List[str]= data_args lowercase__ : Union[str, Any]= self.config.tgt_vocab_size if isinstance(self.config , lowerCAmelCase__ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' " padding.." ) if self.args.label_smoothing == 0: lowercase__ : Union[str, Any]= torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss lowercase__ : int= label_smoothed_nll_loss def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' if self.optimizer is None: lowercase__ : Dict= ["bias", "LayerNorm.weight"] lowercase__ : Dict= [ { "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], "weight_decay": self.args.weight_decay, }, { "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], "weight_decay": 0.0, }, ] lowercase__ : Optional[Any]= Adafactor if self.args.adafactor else AdamW if self.args.adafactor: lowercase__ : int= Adafactor lowercase__ : Union[str, Any]= {"scale_parameter": False, "relative_step": False} else: lowercase__ : str= AdamW lowercase__ : str= { "betas": (self.args.adam_betaa, self.args.adam_betaa), "eps": self.args.adam_epsilon, } lowercase__ : Dict= self.args.learning_rate if self.sharded_ddp: lowercase__ : Dict= OSS( params=lowerCAmelCase__ , optim=lowerCAmelCase__ , **lowerCAmelCase__ , ) else: lowercase__ : Dict= optimizer_cls(lowerCAmelCase__ , **lowerCAmelCase__ ) if self.lr_scheduler is None: lowercase__ : List[Any]= self._get_lr_scheduler(lowerCAmelCase__ ) else: # ignoring --lr_scheduler logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Optional[Any]= arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": lowercase__ : Optional[Any]= schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": lowercase__ : str= schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: lowercase__ : Union[str, Any]= schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowerCAmelCase__ ) return scheduler def UpperCAmelCase_ ( self ): '''simple docstring''' if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token lowercase__ : Any= model(**lowerCAmelCase__ , use_cache=lowerCAmelCase__ )[0] lowercase__ : int= self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models lowercase__, lowercase__ : Optional[int]= model(**lowerCAmelCase__ , labels=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )[:2] else: # compute label smoothed loss lowercase__ : Dict= model(**lowerCAmelCase__ , use_cache=lowerCAmelCase__ )[0] lowercase__ : Tuple= torch.nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 ) lowercase__, lowercase__ : Union[str, Any]= self.loss_fn(lowerCAmelCase__ , lowerCAmelCase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Optional[int]= inputs.pop("labels" ) lowercase__, lowercase__ : Dict= self._compute_loss(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) return loss def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ): '''simple docstring''' lowercase__ : List[str]= self._prepare_inputs(lowerCAmelCase__ ) lowercase__ : List[str]= { "max_length": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, "num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: lowercase__ : List[Any]= self.model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **lowerCAmelCase__ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: lowercase__ : str= self._pad_tensors_to_max_len(lowerCAmelCase__ , gen_kwargs["max_length"] ) lowercase__ : Optional[Any]= inputs.pop("labels" ) with torch.no_grad(): # compute loss on predict data lowercase__, lowercase__ : List[str]= self._compute_loss(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) lowercase__ : Any= loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) lowercase__ : Dict= generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: lowercase__ : str= self._pad_tensors_to_max_len(lowerCAmelCase__ , gen_kwargs["max_length"] ) return (loss, logits, labels) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' # If PAD token is not defined at least EOS token has to be defined lowercase__ : int= self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( "Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be" F''' padded to `max_length`={max_length}''' ) lowercase__ : Optional[Any]= pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) lowercase__ : List[Any]= tensor return padded_tensor
701
"""simple docstring""" from __future__ import annotations def lowercase__(A ) ->list[int]: # This function is recursive """simple docstring""" lowercase__ : int= len(A ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else lowercase__ : str= array[0] lowercase__ : Optional[Any]= False lowercase__ : Any= 1 lowercase__ : list[int]= [] while not is_found and i < array_length: if array[i] < pivot: lowercase__ : Union[str, Any]= True lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]] lowercase__ : Union[str, Any]= longest_subsequence(A ) if len(A ) > len(A ): lowercase__ : List[str]= temp_array else: i += 1 lowercase__ : List[str]= [element for element in array[1:] if element >= pivot] lowercase__ : List[str]= [pivot, *longest_subsequence(A )] if len(A ) > len(A ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
85
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor a : List[Any] = logging.get_logger(__name__) class __UpperCAmelCase( snake_case__ ): """simple docstring""" def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' warnings.warn( "The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use VideoMAEImageProcessor instead." , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
702
"""simple docstring""" import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": a : int = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) a : List[str] = parser.parse_args() a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) a : Optional[Any] = CLIPImageProcessor() a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") a : Tuple = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
85
0
"""simple docstring""" import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a : List[str] = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["memory_attention", "encoder_attn"], ["attention", "attn"], ["/", "."], [".LayerNorm.gamma", "_layer_norm.weight"], [".LayerNorm.beta", "_layer_norm.bias"], ["r.layer_", "r.layers."], ["output_proj", "out_proj"], ["ffn.dense_1.", "fc2."], ["ffn.dense.", "fc1."], ["ffn_layer_norm", "final_layer_norm"], ["kernel", "weight"], ["encoder_layer_norm.", "encoder.layer_norm."], ["decoder_layer_norm.", "decoder.layer_norm."], ["embeddings.weights", "shared.weight"], ] def lowercase__(A ) ->List[str]: """simple docstring""" for pegasus_name, hf_name in PATTERNS: lowercase__ : Any= k.replace(A , A ) return k def lowercase__(A , A ) ->PegasusForConditionalGeneration: """simple docstring""" lowercase__ : Tuple= DEFAULTS.copy() cfg_kwargs.update(A ) lowercase__ : Dict= PegasusConfig(**A ) lowercase__ : Optional[Any]= PegasusForConditionalGeneration(A ) lowercase__ : Optional[Any]= torch_model.model.state_dict() lowercase__ : int= {} for k, v in tf_weights.items(): lowercase__ : str= rename_state_dict_key(A ) if new_k not in sd: raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' ) if "dense" in k or "proj" in new_k: lowercase__ : int= v.T lowercase__ : List[str]= torch.tensor(A , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}''' # make sure embedding.padding_idx is respected lowercase__ : Optional[Any]= torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] ) lowercase__ : int= mapping["shared.weight"] lowercase__ : str= mapping["shared.weight"] lowercase__ : int= {k: torch.zeros_like(A ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping} mapping.update(**A ) lowercase__, lowercase__ : Any= torch_model.model.load_state_dict(A , strict=A ) lowercase__ : Union[str, Any]= [ k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"] ] assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], f'''no matches found for the following tf keys {extra}''' return torch_model def lowercase__(A="./ckpt/aeslc/model.ckpt-32000" ) ->Dict: """simple docstring""" lowercase__ : Any= tf.train.list_variables(A ) lowercase__ : Optional[int]= {} lowercase__ : Dict= ["Adafactor", "global_step"] for name, shape in tqdm(A , desc="converting tf checkpoint to dict" ): lowercase__ : int= any(pat in name for pat in ignore_name ) if skip_key: continue lowercase__ : List[Any]= tf.train.load_variable(A , A ) lowercase__ : Union[str, Any]= array return tf_weights def lowercase__(A , A ) ->Union[str, Any]: """simple docstring""" lowercase__ : Tuple= Path(A ).parent.name lowercase__ : Union[str, Any]= task_specific_params[f'''summarization_{dataset}''']["max_position_embeddings"] lowercase__ : Any= PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=A ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(A ) # convert model lowercase__ : Optional[int]= get_tf_weights_as_numpy(A ) lowercase__ : Union[str, Any]= task_specific_params[f'''summarization_{dataset}'''] if dataset == "large": lowercase__ : Union[str, Any]= task_specific_params lowercase__ : Any= convert_pegasus(A , A ) torch_model.save_pretrained(A ) lowercase__ : Any= torch_model.state_dict() sd.pop("model.decoder.embed_positions.weight" ) sd.pop("model.encoder.embed_positions.weight" ) torch.save(A , Path(A ) / "pytorch_model.bin" ) if __name__ == "__main__": a : str = argparse.ArgumentParser() # Required parameters parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a : Tuple = parser.parse_args() if args.save_dir is None: a : Optional[int] = Path(args.tf_ckpt_path).parent.name a : Union[str, Any] = os.path.join("""pegasus""", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
703
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() a : Optional[Any] = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]: """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) lowercase__ : List[Any]= config_class.from_json_file(A ) lowercase__ : Any= True lowercase__ : List[str]= True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ : Optional[int]= model_class(A ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ : List[str]= cached_file( A , A , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A ) if compare_with_pt_model: lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" ) lowercase__ : Union[str, Any]= pt_model_class.from_pretrained( pretrained_model_name_or_path=A , config=A , state_dict=A ) with torch.no_grad(): lowercase__ : str= pt_model(**pt_model.dummy_inputs ) lowercase__ : Tuple= pto[0].numpy() lowercase__ : List[Any]= tfo[0].numpy() lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(A , save_format="h5" ) def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]: """simple docstring""" if args_model_type is None: lowercase__ : Tuple= list(MODEL_CLASSES.keys() ) else: lowercase__ : Optional[int]= [args_model_type] for j, model_type in enumerate(A , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(A )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ : int= list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ : Any= model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(A , A ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ : Any= model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Union[str, Any]= config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ : str= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Any= model_shortcut_name if os.path.isfile(A ): lowercase__ : Dict= "converted_model" convert_pt_checkpoint_to_tf( model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , ) if remove_cached_files: os.remove(A ) os.remove(A ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """ """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") a : List[str] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
85
0
"""simple docstring""" from typing import Any class __UpperCAmelCase: def __init__( self , snake_case__ ): '''simple docstring''' lowercase__ : Optional[int]= data lowercase__ : Union[str, Any]= None def __repr__( self ): '''simple docstring''' return F'''Node({self.data})''' class __UpperCAmelCase: def __init__( self ): '''simple docstring''' lowercase__ : str= None def __iter__( self ): '''simple docstring''' lowercase__ : Dict= self.head while node: yield node.data lowercase__ : str= node.next def __len__( self ): '''simple docstring''' return sum(1 for _ in self ) def __repr__( self ): '''simple docstring''' return "->".join([str(UpperCamelCase_ ) for item in self] ) def __getitem__( self , snake_case__ ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , snake_case__ , snake_case__ ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError("list index out of range." ) lowercase__ : Dict= self.head for _ in range(UpperCamelCase_ ): lowercase__ : List[str]= current.next lowercase__ : Optional[int]= data def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' self.insert_nth(len(self ) , UpperCamelCase_ ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' self.insert_nth(0 , UpperCamelCase_ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) lowercase__ : List[str]= Node(UpperCamelCase_ ) if self.head is None: lowercase__ : List[Any]= new_node elif index == 0: lowercase__ : Dict= self.head # link new_node to head lowercase__ : List[Any]= new_node else: lowercase__ : Tuple= self.head for _ in range(index - 1 ): lowercase__ : Tuple= temp.next lowercase__ : Union[str, Any]= temp.next lowercase__ : List[Any]= new_node def UpperCAmelCase_ ( self ): # print every node data '''simple docstring''' print(self ) def UpperCAmelCase_ ( self ): '''simple docstring''' return self.delete_nth(0 ) def UpperCAmelCase_ ( self ): # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def UpperCAmelCase_ ( self , snake_case__ = 0 ): '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) lowercase__ : List[Any]= self.head # default first node if index == 0: lowercase__ : Union[str, Any]= self.head.next else: lowercase__ : Dict= self.head for _ in range(index - 1 ): lowercase__ : str= temp.next lowercase__ : Any= temp.next lowercase__ : Dict= temp.next.next return delete_node.data def UpperCAmelCase_ ( self ): '''simple docstring''' return self.head is None def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= None lowercase__ : Union[str, Any]= self.head while current: # Store the current node's next node. lowercase__ : List[Any]= current.next # Make the current node's next point backwards lowercase__ : List[Any]= prev # Make the previous node be the current node lowercase__ : Optional[Any]= current # Make the current node the next node (to progress iteration) lowercase__ : Optional[Any]= next_node # Return prev in order to put the head at the end lowercase__ : Any= prev def lowercase__() ->None: """simple docstring""" lowercase__ : Optional[Any]= LinkedList() assert linked_list.is_empty() is True assert str(_lowercase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(_lowercase ) == i linked_list.insert_nth(_lowercase , i + 1 ) assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(_lowercase ) == 9 assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): lowercase__ : int= -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8 , 1 ) ) def lowercase__() ->None: """simple docstring""" lowercase__ : List[Any]= [ -9, 100, Node(77_345_112 ), 'dlrow olleH', 7, 5_555, 0, -192.55_555, 'Hello, world!', 77.9, Node(10 ), None, None, 12.20, ] lowercase__ : int= LinkedList() for i in test_input: linked_list.insert_tail(_lowercase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head lowercase__ : List[str]= linked_list.delete_head() assert result == -9 assert ( str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail lowercase__ : int= linked_list.delete_tail() assert result == 12.2 assert ( str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list lowercase__ : Dict= linked_list.delete_nth(10 ) assert result is None assert ( str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!" ) ) assert ( str(_lowercase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_lowercase ) assert ( str(_lowercase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_lowercase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def lowercase__() ->Dict: """simple docstring""" from doctest import testmod testmod() lowercase__ : Optional[int]= LinkedList() linked_list.insert_head(input("Inserting 1st at head " ).strip() ) linked_list.insert_head(input("Inserting 2nd at head " ).strip() ) print("\nPrint list:" ) linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() ) linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() ) print("\nPrint list:" ) linked_list.print_list() print("\nDelete head" ) linked_list.delete_head() print("Delete tail" ) linked_list.delete_tail() print("\nPrint list:" ) linked_list.print_list() print("\nReverse linked list" ) linked_list.reverse() print("\nPrint list:" ) linked_list.print_list() print("\nString representation of linked list:" ) print(_lowercase ) print("\nReading/changing Node data using indexing:" ) print(f'''Element at Position 1: {linked_list[1]}''' ) lowercase__ : List[Any]= input("Enter New Value: " ).strip() print("New list:" ) print(_lowercase ) print(f'''length of linked_list is : {len(_lowercase )}''' ) if __name__ == "__main__": main()
704
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
0
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __UpperCAmelCase( lowercase__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = KandinskyVaaControlnetImgaImgPipeline __lowerCamelCase = ["image_embeds", "negative_image_embeds", "image", "hint"] __lowerCamelCase = ["image_embeds", "negative_image_embeds", "image", "hint"] __lowerCamelCase = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __lowerCamelCase = False @property def UpperCAmelCase_ ( self ): '''simple docstring''' return 32 @property def UpperCAmelCase_ ( self ): '''simple docstring''' return 32 @property def UpperCAmelCase_ ( self ): '''simple docstring''' return self.time_input_dim @property def UpperCAmelCase_ ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def UpperCAmelCase_ ( self ): '''simple docstring''' return 100 @property def UpperCAmelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ : Any= { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowercase__ : str= UNetaDConditionModel(**__lowerCamelCase ) return model @property def UpperCAmelCase_ ( self ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def UpperCAmelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ : Any= VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= self.dummy_unet lowercase__ : Union[str, Any]= self.dummy_movq lowercase__ : Optional[Any]= { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_00_85, "beta_end": 0.0_12, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } lowercase__ : List[Any]= DDIMScheduler(**__lowerCamelCase ) lowercase__ : Dict= { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def UpperCAmelCase_ ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' lowercase__ : List[Any]= floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowercase__ : Union[str, Any]= floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCamelCase ) # create init_image lowercase__ : List[str]= floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowercase__ : str= image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase__ : Optional[Any]= Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) ) # create hint lowercase__ : Union[str, Any]= floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) if str(__lowerCamelCase ).startswith("mps" ): lowercase__ : List[str]= torch.manual_seed(__lowerCamelCase ) else: lowercase__ : str= torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowercase__ : str= { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= "cpu" lowercase__ : Any= self.get_dummy_components() lowercase__ : Union[str, Any]= self.pipeline_class(**__lowerCamelCase ) lowercase__ : str= pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowercase__ : str= pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) lowercase__ : List[str]= output.images lowercase__ : int= pipe( **self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0] lowercase__ : Optional[Any]= image[0, -3:, -3:, -1] lowercase__ : Optional[int]= image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ : Any= np.array( [0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" ) lowercase__ : Tuple= load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowercase__ : List[str]= init_image.resize((512, 512) ) lowercase__ : Optional[Any]= load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) lowercase__ : Optional[Any]= torch.from_numpy(np.array(__lowerCamelCase ) ).float() / 255.0 lowercase__ : Optional[Any]= hint.permute(2 , 0 , 1 ).unsqueeze(0 ) lowercase__ : Optional[Any]= "A robot, 4k photo" lowercase__ : str= KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCamelCase ) lowercase__ : int= KandinskyVaaControlnetImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa ) lowercase__ : List[Any]= pipeline.to(__lowerCamelCase ) pipeline.set_progress_bar_config(disable=__lowerCamelCase ) lowercase__ : str= torch.Generator(device="cpu" ).manual_seed(0 ) lowercase__ : List[str]= pipe_prior( __lowerCamelCase , image=__lowerCamelCase , strength=0.85 , generator=__lowerCamelCase , negative_prompt="" , ).to_tuple() lowercase__ : Dict= pipeline( image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , hint=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , ) lowercase__ : List[Any]= output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
705
"""simple docstring""" def lowercase__(A ) ->list: """simple docstring""" if n_term == "": return [] lowercase__ : list= [] for temp in range(int(A ) ): series.append(f'''1/{temp + 1}''' if series else "1" ) return series if __name__ == "__main__": a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""") print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""") print(harmonic_series(nth_term))
85
0
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCAmelCase( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" __lowerCamelCase = StableDiffusionSAGPipeline __lowerCamelCase = TEXT_TO_IMAGE_PARAMS __lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS __lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __lowerCamelCase = False def UpperCAmelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ : str= UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) lowercase__ : Dict= DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , ) torch.manual_seed(0 ) lowercase__ : Union[str, Any]= AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__ : Tuple= CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase__ : List[Any]= CLIPTextModel(UpperCAmelCase_ ) lowercase__ : Any= CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowercase__ : List[Any]= { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCAmelCase_ ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' if str(UpperCAmelCase_ ).startswith("mps" ): lowercase__ : Union[str, Any]= torch.manual_seed(UpperCAmelCase_ ) else: lowercase__ : Optional[int]= torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) lowercase__ : Optional[Any]= { 'prompt': '.', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 1.0, 'sag_scale': 1.0, 'output_type': 'numpy', } return inputs def UpperCAmelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" ) lowercase__ : Optional[Any]= sag_pipe.to(UpperCAmelCase_ ) sag_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowercase__ : Dict= '.' lowercase__ : Any= torch.manual_seed(0 ) lowercase__ : Dict= sag_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" ) lowercase__ : List[str]= output.images lowercase__ : List[Any]= image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ : Optional[Any]= np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) lowercase__ : List[Any]= sag_pipe.to(UpperCAmelCase_ ) sag_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowercase__ : Any= '.' lowercase__ : List[Any]= torch.manual_seed(0 ) lowercase__ : List[str]= sag_pipe( [prompt] , generator=UpperCAmelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" ) lowercase__ : Dict= output.images lowercase__ : List[Any]= image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ : List[Any]= np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) lowercase__ : List[str]= sag_pipe.to(UpperCAmelCase_ ) sag_pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowercase__ : List[str]= '.' lowercase__ : List[str]= torch.manual_seed(0 ) lowercase__ : List[Any]= sag_pipe( [prompt] , width=768 , height=512 , generator=UpperCAmelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , ) lowercase__ : List[Any]= output.images assert image.shape == (1, 512, 768, 3)
706
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : int = logging.get_logger(__name__) a : str = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "big_bird" def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , ) lowercase__ : Dict= vocab_size lowercase__ : Optional[int]= max_position_embeddings lowercase__ : List[Any]= hidden_size lowercase__ : List[str]= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : Optional[int]= intermediate_size lowercase__ : Optional[int]= hidden_act lowercase__ : Tuple= hidden_dropout_prob lowercase__ : int= attention_probs_dropout_prob lowercase__ : int= initializer_range lowercase__ : List[Any]= type_vocab_size lowercase__ : Union[str, Any]= layer_norm_eps lowercase__ : Optional[Any]= use_cache lowercase__ : Union[str, Any]= rescale_embeddings lowercase__ : Union[str, Any]= attention_type lowercase__ : Any= use_bias lowercase__ : List[Any]= block_size lowercase__ : Optional[Any]= num_random_blocks lowercase__ : Optional[int]= classifier_dropout class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def UpperCAmelCase_ ( self ): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : Tuple= {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
85
0
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class __UpperCAmelCase( unittest.TestCase , UpperCAmelCase_ ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= load_tool("text-classification" ) self.tool.setup() lowercase__ : Dict= load_tool("text-classification" , remote=_lowercase ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.tool("That\'s quite cool" , ["positive", "negative"] ) self.assertEqual(_lowercase , "positive" ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.remote_tool("That\'s quite cool" , ["positive", "negative"] ) self.assertEqual(_lowercase , "positive" ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.tool(text="That\'s quite cool" , labels=["positive", "negative"] ) self.assertEqual(_lowercase , "positive" ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= self.remote_tool(text="That\'s quite cool" , labels=["positive", "negative"] ) self.assertEqual(_lowercase , "positive" )
707
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
85
0
def lowercase__(A , A , A ) ->float: """simple docstring""" lowercase__ : Union[str, Any]= (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def lowercase__() ->Union[str, Any]: """simple docstring""" print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
708
"""simple docstring""" from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def lowercase__(A , A ) ->List[Any]: """simple docstring""" lowercase__ : str= [] for part_id in partition_order: lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(A ): expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Dict= Spark(A ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(2 ) lowercase__ : Optional[Any]= [1, 0] lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions. lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->int: """simple docstring""" lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(1 ) lowercase__ : str= SparkExamplesIterable(A ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(A ): assert row_id == f'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : int= spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: lowercase__ : Optional[Any]= lambda A : x.reverse() lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] ) lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Any: """simple docstring""" lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 ) # Partitions 0 and 2 lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Optional[int]= Spark(A ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
85
0
import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def UpperCAmelCase_ ( self , snake_case__=0 ): '''simple docstring''' lowercase__ : int= np.random.RandomState(_lowerCAmelCase ) lowercase__ : Union[str, Any]= { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ : str= self.get_dummy_inputs() lowercase__ : List[Any]= pipe(**_lowerCAmelCase ).images lowercase__ : str= image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase__ : List[str]= np.array([0.6_50_72, 0.5_84_92, 0.4_82_19, 0.5_55_21, 0.5_31_80, 0.5_59_39, 0.5_06_97, 0.3_98_00, 0.4_64_55] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowercase__ : Any= PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ : str= self.get_dummy_inputs() lowercase__ : Dict= pipe(**_lowerCAmelCase ).images lowercase__ : str= image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase__ : Optional[Any]= np.array([0.6_58_63, 0.5_94_25, 0.4_93_26, 0.5_63_13, 0.5_38_75, 0.5_66_27, 0.5_10_65, 0.3_97_77, 0.4_63_30] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowercase__ : Union[str, Any]= LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ : Optional[Any]= self.get_dummy_inputs() lowercase__ : Optional[Any]= pipe(**_lowerCAmelCase ).images lowercase__ : List[Any]= image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase__ : int= np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowercase__ : Any= EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ : Any= self.get_dummy_inputs() lowercase__ : List[Any]= pipe(**_lowerCAmelCase ).images lowercase__ : Optional[int]= image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase__ : Tuple= np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowercase__ : str= EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ : Optional[Any]= self.get_dummy_inputs() lowercase__ : int= pipe(**_lowerCAmelCase ).images lowercase__ : str= image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase__ : Any= np.array([0.5_38_17, 0.6_08_12, 0.4_73_84, 0.4_95_30, 0.5_18_94, 0.4_98_14, 0.4_79_84, 0.3_89_58, 0.4_42_71] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowercase__ : Optional[Any]= DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ : Any= self.get_dummy_inputs() lowercase__ : Any= pipe(**_lowerCAmelCase ).images lowercase__ : List[str]= image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowercase__ : List[str]= np.array([0.5_38_95, 0.6_08_08, 0.4_79_33, 0.4_96_08, 0.5_18_86, 0.4_99_50, 0.4_80_53, 0.3_89_57, 0.4_42_00] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ : List[str]= self.get_dummy_inputs() lowercase__ : List[Any]= 3 * [inputs["prompt"]] # forward lowercase__ : Optional[int]= pipe(**_lowerCAmelCase ) lowercase__ : str= output.images[0, -3:, -3:, -1] lowercase__ : int= self.get_dummy_inputs() lowercase__ : int= 3 * [inputs.pop("prompt" )] lowercase__ : int= pipe.tokenizer( _lowerCAmelCase , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="np" , ) lowercase__ : Optional[int]= text_inputs["input_ids"] lowercase__ : Any= pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] lowercase__ : Tuple= prompt_embeds # forward lowercase__ : Any= pipe(**_lowerCAmelCase ) lowercase__ : List[str]= output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ : Any= self.get_dummy_inputs() lowercase__ : int= 3 * ["this is a negative prompt"] lowercase__ : Union[str, Any]= negative_prompt lowercase__ : Optional[Any]= 3 * [inputs["prompt"]] # forward lowercase__ : str= pipe(**_lowerCAmelCase ) lowercase__ : Tuple= output.images[0, -3:, -3:, -1] lowercase__ : int= self.get_dummy_inputs() lowercase__ : Union[str, Any]= 3 * [inputs.pop("prompt" )] lowercase__ : Any= [] for p in [prompt, negative_prompt]: lowercase__ : Tuple= pipe.tokenizer( _lowerCAmelCase , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="np" , ) lowercase__ : Optional[Any]= text_inputs["input_ids"] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) lowercase__, lowercase__ : Optional[Any]= embeds # forward lowercase__ : List[str]= pipe(**_lowerCAmelCase ) lowercase__ : Optional[Any]= output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @nightly @require_onnxruntime @require_torch_gpu class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @property def UpperCAmelCase_ ( self ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= ort.SessionOptions() lowercase__ : Dict= False return options def UpperCAmelCase_ ( self ): '''simple docstring''' # using the PNDM scheduler by default lowercase__ : Optional[Any]= OnnxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ : Optional[Any]= "A painting of a squirrel eating a burger" np.random.seed(0 ) lowercase__ : int= sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" ) lowercase__ : Any= output.images lowercase__ : Optional[int]= image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ : Dict= np.array([0.04_52, 0.03_90, 0.00_87, 0.03_50, 0.06_17, 0.03_64, 0.05_44, 0.05_23, 0.07_20] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= DDIMScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) lowercase__ : List[Any]= OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ : Optional[Any]= "open neural network exchange" lowercase__ : Optional[Any]= np.random.RandomState(0 ) lowercase__ : Dict= sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="np" ) lowercase__ : List[str]= output.images lowercase__ : str= image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ : Optional[int]= np.array([0.28_67, 0.19_74, 0.14_81, 0.72_94, 0.72_51, 0.66_67, 0.41_94, 0.56_42, 0.64_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) lowercase__ : Tuple= OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ : Union[str, Any]= "open neural network exchange" lowercase__ : str= np.random.RandomState(0 ) lowercase__ : Optional[int]= sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="np" ) lowercase__ : Tuple= output.images lowercase__ : Optional[int]= image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ : List[Any]= np.array([0.23_06, 0.19_59, 0.15_93, 0.65_49, 0.63_94, 0.54_08, 0.50_65, 0.60_10, 0.61_61] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= 0 def test_callback_fn(snake_case__ , snake_case__ , snake_case__ ) -> None: lowercase__ : List[Any]= True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) lowercase__ : List[Any]= latents[0, -3:, -3:, -1] lowercase__ : str= np.array( [-0.67_72, -0.38_35, -1.24_56, 0.19_05, -1.09_74, 0.69_67, -1.93_53, 0.01_78, 1.01_67] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) lowercase__ : int= latents[0, -3:, -3:, -1] lowercase__ : int= np.array( [-0.33_51, 0.22_41, -0.18_37, -0.23_25, -0.65_77, 0.33_93, -0.02_41, 0.58_99, 1.38_75] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 lowercase__ : int= False lowercase__ : Dict= OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) lowercase__ : Tuple= "Andromeda galaxy in a bottle" lowercase__ : Union[str, Any]= np.random.RandomState(0 ) pipe( prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert pipe.safety_checker is None lowercase__ : Optional[int]= pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_lowerCAmelCase ) lowercase__ : Union[str, Any]= OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase ) # sanity check that the pipeline still works assert pipe.safety_checker is None lowercase__ : Tuple= pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None
709
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ): '''simple docstring''' lowercase__ : Optional[int]= parent lowercase__ : Tuple= batch_size lowercase__ : Tuple= seq_length lowercase__ : str= is_training lowercase__ : str= use_input_lengths lowercase__ : Any= use_token_type_ids lowercase__ : List[Any]= use_labels lowercase__ : Optional[int]= gelu_activation lowercase__ : str= sinusoidal_embeddings lowercase__ : List[str]= causal lowercase__ : Any= asm lowercase__ : Optional[int]= n_langs lowercase__ : Union[str, Any]= vocab_size lowercase__ : int= n_special lowercase__ : Any= hidden_size lowercase__ : int= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : List[str]= hidden_dropout_prob lowercase__ : str= attention_probs_dropout_prob lowercase__ : Any= max_position_embeddings lowercase__ : List[Any]= type_vocab_size lowercase__ : int= type_sequence_label_size lowercase__ : Any= initializer_range lowercase__ : Optional[int]= num_labels lowercase__ : Union[str, Any]= num_choices lowercase__ : List[Any]= summary_type lowercase__ : Optional[int]= use_proj lowercase__ : int= scope def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Tuple= None if self.use_input_lengths: lowercase__ : List[Any]= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase__ : Tuple= None if self.use_token_type_ids: lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase__ : str= None lowercase__ : Tuple= None lowercase__ : Dict= None if self.use_labels: lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float() lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : List[Any]= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase_ ( self ): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : Any= FlaubertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ ) lowercase__ : str= model(snake_case__ , langs=snake_case__ ) lowercase__ : Any= model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : List[str]= model(snake_case__ ) lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= model(snake_case__ ) lowercase__ : Any= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , ) lowercase__ : List[str]= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , ) ((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple() lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) ((lowercase__), ) : List[Any]= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ ) lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= self.num_labels lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : int= self.num_choices lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Any= model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ) : Any= config_and_inputs lowercase__ : Tuple= { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) __lowerCamelCase = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowercase__ : List[Any]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) lowercase__ : List[str]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModelTester(self ) lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowercase__ : int= True lowercase__ : List[Any]= model_class(config=snake_case__ ) lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ ) lowercase__ : Dict= torch.jit.trace( snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) ) lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ ) loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) ) @require_torch class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): lowercase__ : Optional[int]= model(snake_case__ )[0] lowercase__ : Optional[int]= torch.Size((1, 11, 768) ) self.assertEqual(output.shape , snake_case__ ) lowercase__ : Dict= torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
85
0
"""simple docstring""" from typing import List from .keymap import KEYMAP, get_character def lowercase__(A ): """simple docstring""" def decorator(A ): lowercase__ : Dict= getattr(lowerCamelCase__ , "handle_key" , [] ) handle += [key] setattr(lowerCamelCase__ , "handle_key" , lowerCamelCase__ ) return func return decorator def lowercase__(*A ): """simple docstring""" def decorator(A ): lowercase__ : Dict= getattr(lowerCamelCase__ , "handle_key" , [] ) handle += keys setattr(lowerCamelCase__ , "handle_key" , lowerCamelCase__ ) return func return decorator class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __new__( cls , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Union[str, Any]= super().__new__(cls , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if not hasattr(UpperCamelCase_ , "key_handler" ): setattr(UpperCamelCase_ , "key_handler" , {} ) setattr(UpperCamelCase_ , "handle_input" , KeyHandler.handle_input ) for value in attrs.values(): lowercase__ : List[Any]= getattr(UpperCamelCase_ , "handle_key" , [] ) for key in handled_keys: lowercase__ : Dict= value return new_cls @staticmethod def UpperCAmelCase_ ( cls ): '''simple docstring''' lowercase__ : str= get_character() if char != KEYMAP["undefined"]: lowercase__ : Tuple= ord(UpperCamelCase_ ) lowercase__ : str= cls.key_handler.get(UpperCamelCase_ ) if handler: lowercase__ : List[str]= char return handler(cls ) else: return None def lowercase__(cls ): """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
710
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = None class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 2 @register_to_config def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ): '''simple docstring''' # standard deviation of the initial noise distribution lowercase__ : int= sigma_max # setable values lowercase__ : int= None lowercase__ : np.IntTensor= None lowercase__ : torch.FloatTensor= None # sigma(t_i) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' return sample def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' lowercase__ : List[Any]= num_inference_steps lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy() lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ ) lowercase__ : Union[str, Any]= [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: lowercase__ : str= 0 # sample eps ~ N(0, S_noise^2 * I) lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device ) lowercase__ : str= sigma + gamma * sigma lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : int= sample_prev + sigma_prev * model_output lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' raise NotImplementedError()
85
0
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCAmelCase: """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=16 , snake_case__=36 , snake_case__=6 , snake_case__=6 , snake_case__=6 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ): '''simple docstring''' lowercase__ : Tuple= parent lowercase__ : Union[str, Any]= batch_size lowercase__ : Any= seq_length lowercase__ : str= is_training lowercase__ : int= use_input_mask lowercase__ : List[Any]= use_token_type_ids lowercase__ : Dict= use_labels lowercase__ : List[Any]= vocab_size lowercase__ : Union[str, Any]= embedding_size lowercase__ : Any= hidden_size lowercase__ : Dict= num_hidden_layers lowercase__ : Any= num_hidden_groups lowercase__ : List[str]= num_attention_heads lowercase__ : List[str]= intermediate_size lowercase__ : Union[str, Any]= hidden_act lowercase__ : Dict= hidden_dropout_prob lowercase__ : List[Any]= attention_probs_dropout_prob lowercase__ : Optional[int]= max_position_embeddings lowercase__ : Optional[int]= type_vocab_size lowercase__ : str= type_sequence_label_size lowercase__ : Dict= initializer_range lowercase__ : List[str]= num_labels lowercase__ : List[Any]= num_choices lowercase__ : Any= scope def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Union[str, Any]= None if self.use_input_mask: lowercase__ : Any= random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Optional[int]= None if self.use_token_type_ids: lowercase__ : List[str]= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : Optional[int]= None lowercase__ : Tuple= None lowercase__ : int= None if self.use_labels: lowercase__ : Tuple= ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : List[Any]= ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : Any= self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Optional[Any]= AlbertModel(config=__A ) model.to(__A ) model.eval() lowercase__ : Dict= model(__A , attention_mask=__A , token_type_ids=__A ) lowercase__ : List[Any]= model(__A , token_type_ids=__A ) lowercase__ : Tuple= model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Optional[Any]= AlbertForPreTraining(config=__A ) model.to(__A ) model.eval() lowercase__ : Optional[Any]= model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , sentence_order_label=__A , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Tuple= AlbertForMaskedLM(config=__A ) model.to(__A ) model.eval() lowercase__ : int= model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Optional[int]= AlbertForQuestionAnswering(config=__A ) model.to(__A ) model.eval() lowercase__ : Dict= model( __A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Union[str, Any]= self.num_labels lowercase__ : List[Any]= AlbertForSequenceClassification(__A ) model.to(__A ) model.eval() lowercase__ : Dict= model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Union[str, Any]= self.num_labels lowercase__ : Dict= AlbertForTokenClassification(config=__A ) model.to(__A ) model.eval() lowercase__ : List[Any]= model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Any= self.num_choices lowercase__ : str= AlbertForMultipleChoice(config=__A ) model.to(__A ) model.eval() lowercase__ : Union[str, Any]= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Dict= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Optional[Any]= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Any= model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= self.prepare_config_and_inputs() ( lowercase__ ) : str= config_and_inputs lowercase__ : Optional[Any]= {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) __lowerCamelCase = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) __lowerCamelCase = True def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' lowercase__ : int= super()._prepare_for_class(__A , __A , return_labels=__A ) if return_labels: if model_class in get_values(__A ): lowercase__ : str= torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A ) lowercase__ : List[Any]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) return inputs_dict def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= AlbertModelTester(self ) lowercase__ : Dict= ConfigTester(self , config_class=__A , hidden_size=37 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__A ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__A ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__A ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__A ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__A ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase__ : List[str]= type self.model_tester.create_and_check_model(*__A ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Dict= AlbertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @require_torch class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= AlbertModel.from_pretrained("albert-base-v2" ) lowercase__ : Any= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowercase__ : Union[str, Any]= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase__ : Union[str, Any]= model(__A , attention_mask=__A )[0] lowercase__ : Optional[int]= torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __A ) lowercase__ : Dict= torch.tensor( [[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
711
"""simple docstring""" from ....utils import logging a : List[str] = logging.get_logger(__name__) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ): '''simple docstring''' lowercase__ : Dict= config.__dict__ lowercase__ : str= modal_hidden_size if num_labels: lowercase__ : List[str]= num_labels
85
0
"""simple docstring""" import numpy as np class __UpperCAmelCase: """simple docstring""" def __init__( self ): '''simple docstring''' lowercase__ : List[Any]= (0, 0) lowercase__ : Dict= None lowercase__ : List[str]= 0 lowercase__ : Optional[int]= 0 lowercase__ : int= 0 def __eq__( self , snake_case__ ): '''simple docstring''' return self.position == cell.position def UpperCAmelCase_ ( self ): '''simple docstring''' print(self.position ) class __UpperCAmelCase: """simple docstring""" def __init__( self , snake_case__=(5, 5) ): '''simple docstring''' lowercase__ : int= np.zeros(_a ) lowercase__ : List[str]= world_size[0] lowercase__ : Any= world_size[1] def UpperCAmelCase_ ( self ): '''simple docstring''' print(self.w ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Any= [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] lowercase__ : int= cell.position[0] lowercase__ : List[Any]= cell.position[1] lowercase__ : Any= [] for n in neughbour_cord: lowercase__ : Union[str, Any]= current_x + n[0] lowercase__ : Optional[Any]= current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: lowercase__ : Optional[int]= Cell() lowercase__ : Dict= (x, y) lowercase__ : int= cell neighbours.append(_a ) return neighbours def lowercase__(A , A , A ) ->List[Any]: """simple docstring""" lowercase__ : Any= [] lowercase__ : Dict= [] _open.append(__lowerCAmelCase ) while _open: lowercase__ : Any= np.argmin([n.f for n in _open] ) lowercase__ : str= _open[min_f] _closed.append(_open.pop(__lowerCAmelCase ) ) if current == goal: break for n in world.get_neigbours(__lowerCAmelCase ): for c in _closed: if c == n: continue lowercase__ : Optional[Any]= current.g + 1 lowercase__ : int= n.position lowercase__ : List[Any]= goal.position lowercase__ : Tuple= (ya - ya) ** 2 + (xa - xa) ** 2 lowercase__ : Any= n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(__lowerCAmelCase ) lowercase__ : str= [] while current.parent is not None: path.append(current.position ) lowercase__ : Tuple= current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": a : Optional[Any] = Gridworld() # Start position and goal a : str = Cell() a : Union[str, Any] = (0, 0) a : Dict = Cell() a : str = (4, 4) print(F"""path from {start.position} to {goal.position}""") a : List[str] = astar(world, start, goal) # Just for visual reasons. for i in s: a : str = 1 print(world.w)
712
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase__(A ) ->int: """simple docstring""" lowercase__ : Optional[int]= [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def lowercase__(A , A ) ->Any: """simple docstring""" lowercase__ : Any= [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def lowercase__(A ) ->List[Any]: """simple docstring""" lowercase__ : Dict= [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") ) return token def lowercase__() ->Union[str, Any]: """simple docstring""" lowercase__ : Dict= [] head.append(("layernorm.weight", "norm.weight") ) head.append(("layernorm.bias", "norm.bias") ) head.append(("classifier.weight", "head.weight") ) head.append(("classifier.bias", "head.bias") ) return head def lowercase__(A , A , A , A ) ->Optional[int]: """simple docstring""" lowercase__ : List[str]= "imagenet-1k-id2label.json" lowercase__ : List[str]= 1_000 lowercase__ : Tuple= "huggingface/label-files" lowercase__ : int= num_labels lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) ) lowercase__ : str= {int(A ): v for k, v in idalabel.items()} lowercase__ : Optional[int]= idalabel lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()} lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13": lowercase__ : int= [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21": lowercase__ : Union[str, Any]= [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Optional[Any]= [2, 2, 20] lowercase__ : Optional[Any]= [3, 12, 16] lowercase__ : List[str]= [192, 768, 1_024] lowercase__ : List[str]= CvtForImageClassification(A ) lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) lowercase__ : Dict= image_size lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) ) lowercase__ : Optional[Any]= OrderedDict() lowercase__ : Tuple= [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowercase__ : Optional[int]= list_of_state_dict + cls_token(A ) lowercase__ : List[str]= list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): lowercase__ : Dict= list_of_state_dict + attention(A , A ) lowercase__ : Optional[Any]= list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): lowercase__ : str= original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": a : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--cvt_model""", default="""cvt-w24""", type=str, help="""Name of the cvt model you'd like to convert.""", ) parser.add_argument( """--image_size""", default=384, type=int, help="""Input Image Size""", ) parser.add_argument( """--cvt_file_name""", default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""", type=str, help="""Input Image Size""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a : Optional[int] = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
85
0
"""simple docstring""" import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings a : Any = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class __UpperCAmelCase( _snake_case ): """simple docstring""" __lowerCamelCase = field(default=_snake_case , metadata={"help": "Whether to use SortishSampler or not."} ) __lowerCamelCase = field( default=_snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) __lowerCamelCase = field( default=_snake_case , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) __lowerCamelCase = field( default=_snake_case , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) __lowerCamelCase = field( default=_snake_case , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= super().to_dict() for k, v in d.items(): if isinstance(snake_case__ , snake_case__ ): lowercase__ : Optional[int]= v.to_dict() return d
713
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() self.register_modules(unet=snake_case__ , scheduler=snake_case__ ) @torch.no_grad() def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[Any]= self.unet.config.sample_size lowercase__ : Dict= (batch_size, 3, img_size, img_size) lowercase__ : List[Any]= self.unet lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma lowercase__ : Tuple= sample.to(self.device ) self.scheduler.set_timesteps(snake_case__ ) self.scheduler.set_sigmas(snake_case__ ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # prediction step lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ) lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean lowercase__ : List[str]= sample_mean.clamp(0 , 1 ) lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ : str= self.numpy_to_pil(snake_case__ ) if not return_dict: return (sample,) return ImagePipelineOutput(images=snake_case__ )
85
0
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) a : Dict = [ """cross_validation.py""", """gradient_accumulation.py""", """local_sgd.py""", """multi_process_metrics.py""", """memory.py""", """automatic_gradient_accumulation.py""", """fsdp_with_peak_mem_tracking.py""", """deepspeed_with_config_support.py""", """megatron_lm_gpt_pretraining.py""", ] class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None ): '''simple docstring''' lowercase__ : int= None lowercase__ : Tuple= os.path.abspath(os.path.join("examples" , "by_feature" ) ) lowercase__ : List[Any]= os.path.abspath("examples" ) for item in os.listdir(_UpperCAmelCase ): if item not in EXCLUDE_EXAMPLES: lowercase__ : List[str]= os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.isfile(_UpperCAmelCase ) and ".py" in item_path: with self.subTest( tested_script=_UpperCAmelCase , feature_script=_UpperCAmelCase , tested_section="main()" if parser_only else "training_function()" , ): lowercase__ : Tuple= compare_against_test( os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowercase__ : List[str]= "\n".join(_UpperCAmelCase ) if special_strings is not None: for string in special_strings: lowercase__ : Any= diff.replace(_UpperCAmelCase , "" ) self.assertEqual(_UpperCAmelCase , "" ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.one_complete_example("complete_nlp_example.py" , _UpperCAmelCase ) self.one_complete_example("complete_nlp_example.py" , _UpperCAmelCase ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= os.path.abspath(os.path.join("examples" , "cv_example.py" ) ) lowercase__ : List[Any]= [ " " * 16 + "{\n\n", " " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n", " " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n", " " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n", " " * 20 + "\"epoch\": epoch,\n\n", " " * 16 + "},\n\n", " " * 16 + "step=epoch,\n", " " * 12, " " * 8 + "for step, batch in enumerate(active_dataloader):\n", ] self.one_complete_example("complete_cv_example.py" , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) self.one_complete_example("complete_cv_example.py" , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) @mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = False @classmethod def UpperCAmelCase_ ( cls ): '''simple docstring''' super().setUpClass() lowercase__ : Union[str, Any]= tempfile.mkdtemp() lowercase__ : List[str]= os.path.join(cls._tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) lowercase__ : int= ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def UpperCAmelCase_ ( cls ): '''simple docstring''' super().tearDownClass() shutil.rmtree(cls._tmpdir ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= F''' examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= F''' examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} '''.split() lowercase__ : List[Any]= run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= F''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )} '''.split() lowercase__ : Any= run_command(self._launch_args + testargs , return_stdout=_UpperCAmelCase ) self.assertNotIn("epoch 0:" , _UpperCAmelCase ) self.assertIn("epoch 1:" , _UpperCAmelCase ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= F''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )} '''.split() lowercase__ : Optional[Any]= run_command(self._launch_args + testargs , return_stdout=_UpperCAmelCase ) if torch.cuda.is_available(): lowercase__ : List[Any]= torch.cuda.device_count() else: lowercase__ : Tuple= 1 if num_processes > 1: self.assertNotIn("epoch 0:" , _UpperCAmelCase ) self.assertIn("epoch 1:" , _UpperCAmelCase ) else: self.assertIn("epoch 0:" , _UpperCAmelCase ) self.assertIn("epoch 1:" , _UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split() with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ): lowercase__ : int= run_command(self._launch_args + testargs , return_stdout=_UpperCAmelCase ) lowercase__ : int= re.findall("({.+})" , _UpperCAmelCase ) lowercase__ : List[str]= [r for r in results if "accuracy" in r][-1] lowercase__ : Optional[int]= ast.literal_eval(_UpperCAmelCase ) self.assertGreaterEqual(results["accuracy"] , 0.75 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= ["examples/by_feature/multi_process_metrics.py"] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def UpperCAmelCase_ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: lowercase__ : Union[str, Any]= F''' examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "tracking" ) ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= ["examples/by_feature/gradient_accumulation.py"] run_command(self._launch_args + testargs ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= ["examples/by_feature/local_sgd.py"] run_command(self._launch_args + testargs )
714
"""simple docstring""" def lowercase__(A ) ->list[int]: """simple docstring""" lowercase__ : List[str]= len(A ) for i in range(A ): for j in range(i + 1 , A ): if numbers[j] < numbers[i]: lowercase__, lowercase__ : List[str]= numbers[j], numbers[i] return numbers if __name__ == "__main__": a : Dict = input("""Enter numbers separated by a comma:\n""").strip() a : List[str] = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
85
0
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME a : Optional[Any] = ["small", "medium", "large"] a : Dict = "lm_head.decoder.weight" a : str = "lm_head.weight" def lowercase__(A , A ) ->str: lowercase__ : List[Any]= torch.load(__A ) lowercase__ : Tuple= d.pop(__A ) os.makedirs(__A , exist_ok=__A ) torch.save(__A , os.path.join(__A , __A ) ) if __name__ == "__main__": a : Dict = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) a : Optional[int] = parser.parse_args() for MODEL in DIALOGPT_MODELS: a : Union[str, Any] = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""") a : str = F"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
715
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def lowercase__(A ) ->bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase__() ->Iterator[int]: """simple docstring""" lowercase__ : Union[str, Any]= 2 while True: if is_prime(A ): yield num num += 1 def lowercase__(A = 2_000_000 ) ->int: """simple docstring""" return sum(takewhile(lambda A : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"""{solution() = }""")
85
0
"""simple docstring""" import numpy as np def lowercase__(A ) ->Union[str, Any]: """simple docstring""" return 1 / (1 + np.exp(-vector )) def lowercase__(A ) ->Any: """simple docstring""" return vector * sigmoid(_lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
716
"""simple docstring""" def lowercase__(A ) ->bool: """simple docstring""" lowercase__ : Tuple= (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowercase__(A = 5_000 ) ->int: """simple docstring""" lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )] for i, pentagonal_i in enumerate(A ): for j in range(A , len(A ) ): lowercase__ : List[Any]= pentagonal_nums[j] lowercase__ : int= pentagonal_i + pentagonal_j lowercase__ : Optional[int]= pentagonal_j - pentagonal_i if is_pentagonal(A ) and is_pentagonal(A ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
85
0
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= "ylacombe/bark-small" lowercase__ : Dict= tempfile.mkdtemp() lowercase__ : Dict= "en_speaker_1" lowercase__ : List[str]= "This is a test string" lowercase__ : Any= "speaker_embeddings_path.json" lowercase__ : int= "speaker_embeddings" def UpperCAmelCase_ ( self , **snake_case__ ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCamelCase ) def UpperCAmelCase_ ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= self.get_tokenizer() lowercase__ : Optional[int]= BarkProcessor(tokenizer=__lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) lowercase__ : str= BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowercase__ : Optional[Any]= self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowercase__ : Optional[int]= BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowercase__ : Tuple= 35 lowercase__ : Dict= 2 lowercase__ : Dict= 8 lowercase__ : str= { "semantic_prompt": np.ones(__lowerCamelCase ), "coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ), "fine_prompt": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowercase__ : int= processor(text=self.input_string , voice_preset=__lowerCamelCase ) lowercase__ : List[Any]= inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file lowercase__ : Union[str, Any]= os.path.join(self.tmpdirname , "file.npz" ) np.savez(__lowerCamelCase , **__lowerCamelCase ) lowercase__ : Optional[int]= processor(text=self.input_string , voice_preset=__lowerCamelCase ) lowercase__ : Union[str, Any]= inputs["history_prompt"] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub lowercase__ : List[Any]= processor(text=self.input_string , voice_preset=self.voice_preset ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= self.get_tokenizer() lowercase__ : Optional[int]= BarkProcessor(tokenizer=__lowerCamelCase ) lowercase__ : List[Any]= processor(text=self.input_string ) lowercase__ : List[Any]= tokenizer( self.input_string , padding="max_length" , max_length=256 , add_special_tokens=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
717
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[str] = logging.get_logger(__name__) a : Union[str, Any] = { """google/pix2struct-textcaps-base""": ( """https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json""" ), } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_text_model" __lowerCamelCase = ["past_key_values"] __lowerCamelCase = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' lowercase__ : int= vocab_size lowercase__ : Optional[Any]= hidden_size lowercase__ : Tuple= d_kv lowercase__ : Optional[int]= d_ff lowercase__ : Any= num_layers lowercase__ : Dict= num_heads lowercase__ : List[Any]= relative_attention_num_buckets lowercase__ : Optional[Any]= relative_attention_max_distance lowercase__ : Dict= dropout_rate lowercase__ : Tuple= layer_norm_epsilon lowercase__ : str= initializer_factor lowercase__ : Any= use_cache lowercase__ : Optional[int]= eos_token_id lowercase__ : str= decoder_start_token_id # for backwards compatibility lowercase__ : Optional[Any]= dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : str= config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_vision_model" def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase__ : Tuple= hidden_size lowercase__ : Tuple= patch_embed_hidden_size lowercase__ : Optional[Any]= d_ff lowercase__ : Dict= dropout_rate lowercase__ : Any= num_hidden_layers lowercase__ : Optional[int]= num_attention_heads lowercase__ : Dict= initializer_range lowercase__ : Tuple= initializer_factor lowercase__ : Tuple= attention_dropout lowercase__ : Optional[Any]= layer_norm_eps lowercase__ : List[Any]= dense_act_fn lowercase__ : str= seq_len lowercase__ : List[str]= relative_attention_num_buckets lowercase__ : Union[str, Any]= relative_attention_max_distance lowercase__ : Dict= d_kv @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : Union[str, Any]= config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct" __lowerCamelCase = True def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowercase__ : List[Any]= {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: lowercase__ : str= {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) lowercase__ : str= PixaStructTextConfig(**snake_case__ ) lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ ) lowercase__ : int= self.text_config.decoder_start_token_id lowercase__ : List[Any]= self.text_config.pad_token_id lowercase__ : Any= self.text_config.eos_token_id lowercase__ : Any= initializer_factor lowercase__ : int= initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : Dict= is_vqa @classmethod def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ ) lowercase__ : str= self.text_config.to_dict() lowercase__ : str= self.vision_config.to_dict() lowercase__ : List[str]= self.__class__.model_type return output
85
0
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name a : Optional[int] = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n" def lowercase__(A , A , A=8 ) ->Optional[int]: """simple docstring""" lowercase__ : int= height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 lowercase__ : Any= width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class __UpperCAmelCase( _UpperCAmelCase ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' super().__init__() self.register_modules( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , movq=lowerCamelCase_ , ) lowercase__ : Dict= 2 ** (len(self.movq.config.block_out_channels ) - 1) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if latents is None: lowercase__ : List[Any]= randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) lowercase__ : int= latents.to(lowerCamelCase_ ) lowercase__ : List[str]= latents * scheduler.init_noise_sigma return latents def UpperCAmelCase_ ( self , snake_case__=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) lowercase__ : List[str]= torch.device(F'''cuda:{gpu_id}''' ) lowercase__ : int= [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase_ ( self , snake_case__=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) lowercase__ : Optional[int]= torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=lowerCamelCase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowercase__ : str= None for cpu_offloaded_model in [self.unet, self.movq]: lowercase__ : List[Any]= cpu_offload_with_hook(lowerCamelCase_ , lowerCamelCase_ , prev_module_hook=lowerCamelCase_ ) # We'll offload the last model manually. lowercase__ : int= hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCAmelCase_ ( self ): '''simple docstring''' if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCamelCase_ , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowerCamelCase_ ) def __call__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 100 , snake_case__ = 4.0 , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , ): '''simple docstring''' lowercase__ : Tuple= self._execution_device lowercase__ : List[Any]= guidance_scale > 1.0 if isinstance(lowerCamelCase_ , lowerCamelCase_ ): lowercase__ : str= torch.cat(lowerCamelCase_ , dim=0 ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): lowercase__ : Optional[Any]= torch.cat(lowerCamelCase_ , dim=0 ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): lowercase__ : Union[str, Any]= torch.cat(lowerCamelCase_ , dim=0 ) lowercase__ : List[Any]= image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: lowercase__ : Tuple= image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 ) lowercase__ : Optional[Any]= negative_image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 ) lowercase__ : Tuple= hint.repeat_interleave(lowerCamelCase_ , dim=0 ) lowercase__ : Any= torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ ) lowercase__ : List[Any]= torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ ) self.scheduler.set_timesteps(lowerCamelCase_ , device=lowerCamelCase_ ) lowercase__ : Dict= self.scheduler.timesteps lowercase__ : List[str]= self.movq.config.latent_channels lowercase__ : Any= downscale_height_and_width(lowerCamelCase_ , lowerCamelCase_ , self.movq_scale_factor ) # create initial latent lowercase__ : int= self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ): # expand the latents if we are doing classifier free guidance lowercase__ : Dict= torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase__ : Any= {'''image_embeds''': image_embeds, '''hint''': hint} lowercase__ : Dict= self.unet( sample=lowerCamelCase_ , timestep=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , added_cond_kwargs=lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0] if do_classifier_free_guidance: lowercase__ : Any= noise_pred.split(latents.shape[1] , dim=1 ) lowercase__ : str= noise_pred.chunk(2 ) lowercase__ : List[Any]= variance_pred.chunk(2 ) lowercase__ : Dict= noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowercase__ : str= torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowercase__ : int= noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 lowercase__ : str= self.scheduler.step( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ , )[0] # post-processing lowercase__ : int= self.movq.decode(lowerCamelCase_ , force_not_quantize=lowerCamelCase_ )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: lowercase__ : Optional[Any]= image * 0.5 + 0.5 lowercase__ : Optional[Any]= image.clamp(0 , 1 ) lowercase__ : str= image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowercase__ : Optional[int]= self.numpy_to_pil(lowerCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase_ )
718
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" ) lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy() lowercase__ : int= -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
85
0
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __UpperCAmelCase( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" __lowerCamelCase = AutoencoderKL __lowerCamelCase = 'sample' __lowerCamelCase = 1E-2 @property def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= 4 lowercase__ : Optional[int]= 3 lowercase__ : Union[str, Any]= (32, 32) lowercase__ : Union[str, Any]= floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase ) return {"sample": image} @property def UpperCAmelCase_ ( self ): '''simple docstring''' return (3, 32, 32) @property def UpperCAmelCase_ ( self ): '''simple docstring''' return (3, 32, 32) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } lowercase__ : List[str]= self.dummy_input return init_dict, inputs_dict def UpperCAmelCase_ ( self ): '''simple docstring''' pass def UpperCAmelCase_ ( self ): '''simple docstring''' pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : str= self.prepare_init_args_and_inputs_for_common() lowercase__ : Any= self.model_class(**__UpperCamelCase ) model.to(__UpperCamelCase ) assert not model.is_gradient_checkpointing and model.training lowercase__ : List[str]= model(**__UpperCamelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() lowercase__ : List[str]= torch.randn_like(__UpperCamelCase ) lowercase__ : Optional[Any]= (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing lowercase__ : Dict= self.model_class(**__UpperCamelCase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(__UpperCamelCase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training lowercase__ : int= model_a(**__UpperCamelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() lowercase__ : List[Any]= (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) lowercase__ : Any= dict(model.named_parameters() ) lowercase__ : Any= dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Any= AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__UpperCamelCase ) lowercase__ : Dict= model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) lowercase__ : List[Any]= model.to(__UpperCamelCase ) model.eval() if torch_device == "mps": lowercase__ : Optional[Any]= torch.manual_seed(0 ) else: lowercase__ : int= torch.Generator(device=__UpperCamelCase ).manual_seed(0 ) lowercase__ : Any= torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) lowercase__ : int= image.to(__UpperCamelCase ) with torch.no_grad(): lowercase__ : Any= model(__UpperCamelCase , sample_posterior=__UpperCamelCase , generator=__UpperCamelCase ).sample lowercase__ : Union[str, Any]= output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": lowercase__ : Optional[int]= torch.tensor( [ -4.0078e-01, -3.8323e-04, -1.2681e-01, -1.1462e-01, 2.0095e-01, 1.0893e-01, -8.8247e-02, -3.0361e-01, -9.8644e-03, ] ) elif torch_device == "cpu": lowercase__ : List[str]= torch.tensor( [-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] ) else: lowercase__ : str= torch.tensor( [-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] ) self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1e-2 ) ) @slow class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' return F'''gaussian_noise_s={seed}_shape={'_'.join([str(__UpperCamelCase ) for s in shape] )}.npy''' def UpperCAmelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self , snake_case__=0 , snake_case__=(4, 3, 512, 512) , snake_case__=False ): '''simple docstring''' lowercase__ : Tuple= torch.floataa if fpaa else torch.floataa lowercase__ : Tuple= torch.from_numpy(load_hf_numpy(self.get_file_format(__UpperCamelCase , __UpperCamelCase ) ) ).to(__UpperCamelCase ).to(__UpperCamelCase ) return image def UpperCAmelCase_ ( self , snake_case__="CompVis/stable-diffusion-v1-4" , snake_case__=False ): '''simple docstring''' lowercase__ : Tuple= "fp16" if fpaa else None lowercase__ : Union[str, Any]= torch.floataa if fpaa else torch.floataa lowercase__ : List[Any]= AutoencoderKL.from_pretrained( __UpperCamelCase , subfolder="vae" , torch_dtype=__UpperCamelCase , revision=__UpperCamelCase , ) model.to(__UpperCamelCase ).eval() return model def UpperCAmelCase_ ( self , snake_case__=0 ): '''simple docstring''' if torch_device == "mps": return torch.manual_seed(__UpperCamelCase ) return torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) @parameterized.expand( [ # fmt: off [33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]], [47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]], # fmt: on ] ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Optional[int]= self.get_sd_vae_model() lowercase__ : Optional[int]= self.get_sd_image(__UpperCamelCase ) lowercase__ : Optional[Any]= self.get_generator(__UpperCamelCase ) with torch.no_grad(): lowercase__ : int= model(__UpperCamelCase , generator=__UpperCamelCase , sample_posterior=__UpperCamelCase ).sample assert sample.shape == image.shape lowercase__ : Tuple= sample[-1, -2:, -2:, :2].flatten().float().cpu() lowercase__ : List[str]= torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]], [47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : List[str]= self.get_sd_vae_model(fpaa=__UpperCamelCase ) lowercase__ : int= self.get_sd_image(__UpperCamelCase , fpaa=__UpperCamelCase ) lowercase__ : Dict= self.get_generator(__UpperCamelCase ) with torch.no_grad(): lowercase__ : Union[str, Any]= model(__UpperCamelCase , generator=__UpperCamelCase , sample_posterior=__UpperCamelCase ).sample assert sample.shape == image.shape lowercase__ : Union[str, Any]= sample[-1, -2:, :2, -2:].flatten().float().cpu() lowercase__ : int= torch.tensor(__UpperCamelCase ) assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]], [47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]], # fmt: on ] ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : int= self.get_sd_vae_model() lowercase__ : Dict= self.get_sd_image(__UpperCamelCase ) with torch.no_grad(): lowercase__ : Union[str, Any]= model(__UpperCamelCase ).sample assert sample.shape == image.shape lowercase__ : str= sample[-1, -2:, -2:, :2].flatten().float().cpu() lowercase__ : str= torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]], [37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Optional[int]= self.get_sd_vae_model() lowercase__ : List[Any]= self.get_sd_image(__UpperCamelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): lowercase__ : Any= model.decode(__UpperCamelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] lowercase__ : Dict= sample[-1, -2:, :2, -2:].flatten().cpu() lowercase__ : List[Any]= torch.tensor(__UpperCamelCase ) assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]], [16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Any= self.get_sd_vae_model(fpaa=__UpperCamelCase ) lowercase__ : str= self.get_sd_image(__UpperCamelCase , shape=(3, 4, 64, 64) , fpaa=__UpperCamelCase ) with torch.no_grad(): lowercase__ : Optional[Any]= model.decode(__UpperCamelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] lowercase__ : Optional[int]= sample[-1, -2:, :2, -2:].flatten().float().cpu() lowercase__ : Optional[Any]= torch.tensor(__UpperCamelCase ) assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : List[str]= self.get_sd_vae_model(fpaa=__UpperCamelCase ) lowercase__ : List[Any]= self.get_sd_image(__UpperCamelCase , shape=(3, 4, 64, 64) , fpaa=__UpperCamelCase ) with torch.no_grad(): lowercase__ : Optional[int]= model.decode(__UpperCamelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): lowercase__ : Any= model.decode(__UpperCamelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : List[Any]= self.get_sd_vae_model() lowercase__ : Tuple= self.get_sd_image(__UpperCamelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): lowercase__ : List[Any]= model.decode(__UpperCamelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): lowercase__ : List[str]= model.decode(__UpperCamelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]], [47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]], # fmt: on ] ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Tuple= self.get_sd_vae_model() lowercase__ : Optional[Any]= self.get_sd_image(__UpperCamelCase ) lowercase__ : Any= self.get_generator(__UpperCamelCase ) with torch.no_grad(): lowercase__ : Any= model.encode(__UpperCamelCase ).latent_dist lowercase__ : Optional[int]= dist.sample(generator=__UpperCamelCase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] lowercase__ : int= sample[0, -1, -3:, -3:].flatten().cpu() lowercase__ : str= torch.tensor(__UpperCamelCase ) lowercase__ : Dict= 3e-3 if torch_device != "mps" else 1e-2 assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=__UpperCamelCase )
719
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "tokenizer"] __lowerCamelCase = "BridgeTowerImageProcessor" __lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[int]= self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) # add pixel_values + pixel_mask lowercase__ : Optional[int]= self.image_processor( snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ ) encoding.update(snake_case__ ) return encoding def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.tokenizer.model_input_names lowercase__ : List[Any]= self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
85
0
"""simple docstring""" def lowercase__(A ) ->int: """simple docstring""" if not numbers: return 0 if not isinstance(A , (list, tuple) ) or not all( isinstance(A , A ) for number in numbers ): raise ValueError("numbers must be an iterable of integers" ) lowercase__ : Union[str, Any]= numbers[0] for i in range(1 , len(A ) ): # update the maximum and minimum subarray products lowercase__ : Dict= numbers[i] if number < 0: lowercase__, lowercase__ : Dict= min_till_now, max_till_now lowercase__ : Union[str, Any]= max(A , max_till_now * number ) lowercase__ : List[str]= min(A , min_till_now * number ) # update the maximum product found till now lowercase__ : Any= max(A , A ) return max_prod
720
"""simple docstring""" import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= tempfile.mkdtemp() lowercase__ : Optional[Any]= 8 # DPR tok lowercase__ : Tuple= [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok lowercase__ : List[Any]= [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple= {"unk_token": "<unk>"} lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case__ ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.get_dummy_dataset() lowercase__ : Optional[Any]= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Tuple= dataset lowercase__ : Optional[int]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Dict= self.get_dummy_dataset() lowercase__ : Tuple= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" ) lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) ) del dataset lowercase__ : List[Any]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowercase__ : Optional[int]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , ) return retriever def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) ) lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" ) lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(snake_case__ , open(snake_case__ , "wb" ) ) lowercase__ : List[Any]= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) lowercase__ : Optional[Any]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= 1 lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever() lowercase__ : Union[str, Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Tuple= self.get_dummy_dataset() retriever.save_pretrained(snake_case__ ) lowercase__ : int= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : Any= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= 1 lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) lowercase__ : Union[str, Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : int= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : Tuple= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= 1 lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) lowercase__ : List[str]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : int= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= 1 lowercase__ : int= self.get_dummy_legacy_index_retriever() lowercase__ : Optional[Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : str= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase_ ( self ): '''simple docstring''' import torch lowercase__ : str= 1 lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever() lowercase__ : str= [[5, 7], [10, 11]] lowercase__ : List[str]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) lowercase__, lowercase__, lowercase__ : Optional[int]= ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , np.ndarray ) lowercase__ : Any= retriever( snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , ) lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer() lowercase__ : Dict= 1 lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) retriever.set_ctx_encoder_tokenizer(snake_case__ ) lowercase__ : List[str]= [[5, 7], [10, 11]] lowercase__ : Any= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) self.assertEqual( len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
85
0
"""simple docstring""" import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node a : Optional[int] = 4 a : Union[str, Any] = 3 class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" pass def lowercase__(A ) ->Any: """simple docstring""" for shard in shards: for i in range(__UpperCamelCase ): yield {"i": i, "shard": shard} def lowercase__() ->List[str]: """simple docstring""" lowercase__ : Any= int(os.environ["RANK"] ) lowercase__ : int= int(os.environ["WORLD_SIZE"] ) lowercase__ : Union[str, Any]= ArgumentParser() parser.add_argument("--streaming" , type=__UpperCamelCase ) parser.add_argument("--local_rank" , type=__UpperCamelCase ) parser.add_argument("--num_workers" , type=__UpperCamelCase , default=0 ) lowercase__ : Tuple= parser.parse_args() lowercase__ : int= args.streaming lowercase__ : Any= args.num_workers lowercase__ : List[Any]= {"""shards""": [f'''shard_{shard_idx}''' for shard_idx in range(__UpperCamelCase )]} lowercase__ : Any= IterableDataset.from_generator(__UpperCamelCase , gen_kwargs=__UpperCamelCase ) if not streaming: lowercase__ : Any= Dataset.from_list(list(__UpperCamelCase ) ) lowercase__ : List[Any]= split_dataset_by_node(__UpperCamelCase , rank=__UpperCamelCase , world_size=__UpperCamelCase ) lowercase__ : List[str]= torch.utils.data.DataLoader(__UpperCamelCase , num_workers=__UpperCamelCase ) lowercase__ : Dict= NUM_SHARDS * NUM_ITEMS_PER_SHARD lowercase__ : Optional[int]= full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) lowercase__ : Tuple= sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' ) if __name__ == "__main__": main()
721
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "tokenizer"] __lowerCamelCase = "AutoImageProcessor" __lowerCamelCase = "AutoTokenizer" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) lowercase__ : List[Any]= self.image_processor def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ): '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if images is not None: lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if text is not None and images is not None: lowercase__ : Any= image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
85
0
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler a : str = 16 a : Union[str, Any] = 32 def lowercase__(A , A = 16 , A = "bert-base-cased" ) ->Tuple: """simple docstring""" lowercase__ : Optional[Any]= AutoTokenizer.from_pretrained(snake_case__ ) lowercase__ : Optional[int]= load_dataset("glue" , "mrpc" ) def tokenize_function(A ): # max_length=None => use the model max length (it's actually the default) lowercase__ : Tuple= tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase__ : List[str]= datasets.map( snake_case__ , batched=snake_case__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=snake_case__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ : Any= tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(A ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case__ , padding="max_length" , max_length=128 , return_tensors="pt" ) return tokenizer.pad(snake_case__ , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. lowercase__ : str= DataLoader( tokenized_datasets["train"] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) lowercase__ : Dict= DataLoader( tokenized_datasets["validation"] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader def lowercase__(A , A ) ->Optional[Any]: """simple docstring""" lowercase__ : int= Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ : Dict= config["lr"] lowercase__ : Optional[int]= int(config["num_epochs"] ) lowercase__ : Optional[Any]= int(config["seed"] ) lowercase__ : int= int(config["batch_size"] ) lowercase__ : Union[str, Any]= args.model_name_or_path set_seed(snake_case__ ) lowercase__, lowercase__ : int= get_dataloaders(snake_case__ , snake_case__ , snake_case__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ : int= AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ ) # Instantiate optimizer lowercase__ : int= ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowercase__ : Optional[int]= optimizer_cls(params=model.parameters() , lr=snake_case__ ) if accelerator.state.deepspeed_plugin is not None: lowercase__ : Optional[Any]= accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: lowercase__ : Tuple= 1 lowercase__ : List[str]= (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowercase__ : List[str]= get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , ) else: lowercase__ : str= DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Dict= accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # We need to keep track of how many total steps we have iterated over lowercase__ : str= 0 # We also need to keep track of the stating epoch so files are named properly lowercase__ : Any= 0 # Now we train the model lowercase__ : Union[str, Any]= evaluate.load("glue" , "mrpc" ) lowercase__ : str= 0 lowercase__ : int= {} for epoch in range(snake_case__ , snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): lowercase__ : Dict= model(**snake_case__ ) lowercase__ : List[str]= outputs.loss lowercase__ : Optional[Any]= loss / gradient_accumulation_steps accelerator.backward(snake_case__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() lowercase__ : str= 0 for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase__ : str= model(**snake_case__ ) lowercase__ : Optional[Any]= outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowercase__, lowercase__ : Optional[Any]= accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case__ ) - 1: lowercase__ : List[Any]= predictions[: len(eval_dataloader.dataset ) - samples_seen] lowercase__ : Optional[int]= references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) lowercase__ : Optional[int]= metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , snake_case__ ) lowercase__ : Tuple= eval_metric["accuracy"] if best_performance < eval_metric["accuracy"]: lowercase__ : Dict= eval_metric["accuracy"] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f: json.dump(snake_case__ , snake_case__ ) def lowercase__() ->Any: """simple docstring""" lowercase__ : Tuple= argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=snake_case__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=snake_case__ , ) parser.add_argument( "--output_dir" , type=snake_case__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--performance_lower_bound" , type=snake_case__ , default=snake_case__ , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , ) parser.add_argument( "--num_epochs" , type=snake_case__ , default=3 , help="Number of train epochs." , ) lowercase__ : Optional[int]= parser.parse_args() lowercase__ : Tuple= {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
700
"""simple docstring""" a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/""" def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ): lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(A ) lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data ) lowercase__ : Tuple= len(A ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(A ) % 6) else: lowercase__ : str= b"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(A ) , 6 ) ).encode() + padding ) def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ) and not isinstance(A , A ): lowercase__ : str= ( "argument should be a bytes-like object or ASCII string, " f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(A , A ): try: lowercase__ : Optional[Any]= encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) lowercase__ : List[Any]= encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase__ : str= encoded_data[:-padding] lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data ) lowercase__ : Any= [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(A ) , 8 ) ] return bytes(A ) if __name__ == "__main__": import doctest doctest.testmod()
85
0
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def lowercase__(A ) ->Tuple: """simple docstring""" lowercase__ : Any= FileLock(str(tmpdir / "foo.lock" ) ) lowercase__ : Union[str, Any]= FileLock(str(tmpdir / "foo.lock" ) ) lowercase__ : Any= 0.01 with locka.acquire(): with pytest.raises(__SCREAMING_SNAKE_CASE ): lowercase__ : str= time.time() locka.acquire(__SCREAMING_SNAKE_CASE ) assert time.time() - _start > timeout def lowercase__(A ) ->Any: """simple docstring""" lowercase__ : str= "a" * 1_000 + ".lock" lowercase__ : Any= FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(".lock" ) assert not locka._lock_file.endswith(__SCREAMING_SNAKE_CASE ) assert len(os.path.basename(locka._lock_file ) ) <= 255 lowercase__ : Dict= FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(__SCREAMING_SNAKE_CASE ): locka.acquire(0 )
701
"""simple docstring""" from __future__ import annotations def lowercase__(A ) ->list[int]: # This function is recursive """simple docstring""" lowercase__ : int= len(A ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else lowercase__ : str= array[0] lowercase__ : Optional[Any]= False lowercase__ : Any= 1 lowercase__ : list[int]= [] while not is_found and i < array_length: if array[i] < pivot: lowercase__ : Union[str, Any]= True lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]] lowercase__ : Union[str, Any]= longest_subsequence(A ) if len(A ) > len(A ): lowercase__ : List[str]= temp_array else: i += 1 lowercase__ : List[str]= [element for element in array[1:] if element >= pivot] lowercase__ : List[str]= [pivot, *longest_subsequence(A )] if len(A ) > len(A ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
85
0
"""simple docstring""" import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def lowercase__(A , A , A , A , A ) ->List[Any]: """simple docstring""" lowercase__ : str= StableDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors lowercase__ : Union[str, Any]= load_file(A ) lowercase__ : int= [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: lowercase__ : Union[str, Any]= key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" ) lowercase__ : Optional[int]= pipeline.text_encoder else: lowercase__ : Tuple= key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" ) lowercase__ : List[str]= pipeline.unet # find the target layer lowercase__ : Optional[Any]= layer_infos.pop(0 ) while len(A ) > -1: try: lowercase__ : List[str]= curr_layer.__getattr__(A ) if len(A ) > 0: lowercase__ : Union[str, Any]= layer_infos.pop(0 ) elif len(A ) == 0: break except Exception: if len(A ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: lowercase__ : Tuple= layer_infos.pop(0 ) lowercase__ : Any= [] if "lora_down" in key: pair_keys.append(key.replace("lora_down" , "lora_up" ) ) pair_keys.append(A ) else: pair_keys.append(A ) pair_keys.append(key.replace("lora_up" , "lora_down" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: lowercase__ : Dict= state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) lowercase__ : List[Any]= state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(A , A ).unsqueeze(2 ).unsqueeze(3 ) else: lowercase__ : Optional[Any]= state_dict[pair_keys[0]].to(torch.floataa ) lowercase__ : Optional[int]= state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(A , A ) # update visited list for item in pair_keys: visited.append(A ) return pipeline if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") a : Optional[Any] = parser.parse_args() a : int = args.base_model_path a : int = args.checkpoint_path a : List[Any] = args.dump_path a : Tuple = args.lora_prefix_unet a : List[Any] = args.lora_prefix_text_encoder a : List[Any] = args.alpha a : Tuple = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) a : str = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
702
"""simple docstring""" import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": a : int = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) a : List[str] = parser.parse_args() a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) a : Optional[Any] = CLIPImageProcessor() a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") a : Tuple = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
85
0
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowercase__(A ) ->int: """simple docstring""" lowercase__ : Any= [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", "decoder.output_projection.weight", ] for k in ignore_keys: state_dict.pop(snake_case_ , snake_case_ ) def lowercase__(A ) ->Optional[int]: """simple docstring""" lowercase__ : Optional[Any]= emb.weight.shape lowercase__ : int= nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ ) lowercase__ : List[Any]= emb.weight.data return lin_layer def lowercase__(A , A="facebook/mbart-large-en-ro" , A=False , A=False ) ->Any: """simple docstring""" lowercase__ : List[str]= torch.load(snake_case_ , map_location="cpu" )["model"] remove_ignore_keys_(snake_case_ ) lowercase__ : Optional[Any]= state_dict["encoder.embed_tokens.weight"].shape[0] lowercase__ : Optional[Any]= MBartConfig.from_pretrained(snake_case_ , vocab_size=snake_case_ ) if mbart_aa and finetuned: lowercase__ : Any= "relu" lowercase__ : Optional[Any]= state_dict["decoder.embed_tokens.weight"] lowercase__ : Dict= MBartForConditionalGeneration(snake_case_ ) model.model.load_state_dict(snake_case_ ) if finetuned: lowercase__ : str= make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem.""" ) parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--hf_config""", default="""facebook/mbart-large-cc25""", type=str, help="""Which huggingface architecture to use: mbart-large""", ) parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""") parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""") a : Optional[int] = parser.parse_args() a : Union[str, Any] = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
703
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() a : Optional[Any] = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]: """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) lowercase__ : List[Any]= config_class.from_json_file(A ) lowercase__ : Any= True lowercase__ : List[str]= True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ : Optional[int]= model_class(A ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ : List[str]= cached_file( A , A , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A ) if compare_with_pt_model: lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" ) lowercase__ : Union[str, Any]= pt_model_class.from_pretrained( pretrained_model_name_or_path=A , config=A , state_dict=A ) with torch.no_grad(): lowercase__ : str= pt_model(**pt_model.dummy_inputs ) lowercase__ : Tuple= pto[0].numpy() lowercase__ : List[Any]= tfo[0].numpy() lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(A , save_format="h5" ) def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]: """simple docstring""" if args_model_type is None: lowercase__ : Tuple= list(MODEL_CLASSES.keys() ) else: lowercase__ : Optional[int]= [args_model_type] for j, model_type in enumerate(A , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(A )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ : int= list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ : Any= model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(A , A ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ : Any= model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Union[str, Any]= config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ : str= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Any= model_shortcut_name if os.path.isfile(A ): lowercase__ : Dict= "converted_model" convert_pt_checkpoint_to_tf( model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , ) if remove_cached_files: os.remove(A ) os.remove(A ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """ """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") a : List[str] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
85
0
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class __UpperCAmelCase: def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ): '''simple docstring''' lowercase__ : List[str]= parent lowercase__ : Optional[Any]= batch_size lowercase__ : Optional[Any]= seq_length lowercase__ : Optional[Any]= is_training lowercase__ : Optional[Any]= use_input_mask lowercase__ : Optional[int]= use_token_type_ids lowercase__ : Union[str, Any]= use_labels lowercase__ : Tuple= vocab_size lowercase__ : Any= hidden_size lowercase__ : Optional[int]= num_hidden_layers lowercase__ : Any= num_attention_heads lowercase__ : Tuple= intermediate_size lowercase__ : Union[str, Any]= hidden_act lowercase__ : Tuple= hidden_dropout_prob lowercase__ : List[Any]= attention_probs_dropout_prob lowercase__ : List[Any]= max_position_embeddings lowercase__ : str= type_vocab_size lowercase__ : str= type_sequence_label_size lowercase__ : str= initializer_range lowercase__ : Dict= num_labels lowercase__ : Tuple= num_choices lowercase__ : List[Any]= scope def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : int= None if self.use_input_mask: lowercase__ : Tuple= random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Optional[Any]= None if self.use_token_type_ids: lowercase__ : List[str]= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : Optional[int]= None lowercase__ : Tuple= None lowercase__ : str= None if self.use_labels: lowercase__ : Any= ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : int= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Union[str, Any]= ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : List[str]= self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self ): '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : List[Any]= LlamaModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= model(snake_case__ , attention_mask=snake_case__ ) lowercase__ : Union[str, Any]= model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : Any= True lowercase__ : Any= LlamaModel(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Tuple= model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , ) lowercase__ : int= model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , ) lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : int= LlamaForCausalLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : str= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= True lowercase__ : List[Any]= True lowercase__ : List[str]= LlamaForCausalLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() # first forward pass lowercase__ : int= model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , ) lowercase__ : List[str]= outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase__ : List[str]= ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase__ : Union[str, Any]= ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowercase__ : Union[str, Any]= torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase__ : Optional[Any]= torch.cat([input_mask, next_mask] , dim=-1 ) lowercase__ : Any= model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0] lowercase__ : Optional[int]= model( snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0] # select random slice lowercase__ : List[Any]= ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase__ : Optional[Any]= output_from_no_past[:, -3:, random_slice_idx].detach() lowercase__ : Any= output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= self.prepare_config_and_inputs() ( lowercase__ ) : Tuple= config_and_inputs lowercase__ : Tuple= {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __UpperCAmelCase( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): __lowerCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () __lowerCamelCase = (LlamaForCausalLM,) if is_torch_available() else () __lowerCamelCase = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) __lowerCamelCase = False __lowerCamelCase = False def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= LlamaModelTester(self ) lowercase__ : str= ConfigTester(self , config_class=snake_case__ , hidden_size=37 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase__ : Optional[int]= type self.model_tester.create_and_check_model(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Tuple= 3 lowercase__ : List[str]= input_dict["input_ids"] lowercase__ : int= input_ids.ne(1 ).to(snake_case__ ) lowercase__ : Dict= ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowercase__ : List[Any]= LlamaForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Tuple= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Dict= 3 lowercase__ : Optional[int]= "single_label_classification" lowercase__ : List[Any]= input_dict["input_ids"] lowercase__ : Optional[int]= input_ids.ne(1 ).to(snake_case__ ) lowercase__ : Dict= ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowercase__ : Union[str, Any]= LlamaForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : List[Any]= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Union[str, Any]= 3 lowercase__ : Dict= "multi_label_classification" lowercase__ : List[str]= input_dict["input_ids"] lowercase__ : Optional[Any]= input_ids.ne(1 ).to(snake_case__ ) lowercase__ : Optional[int]= ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowercase__ : Tuple= LlamaForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : List[str]= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("LLaMA buffers include complex numbers, which breaks this test" ) def UpperCAmelCase_ ( self ): '''simple docstring''' pass @parameterized.expand([("linear",), ("dynamic",)] ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Any= self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Optional[int]= ids_tensor([1, 10] , config.vocab_size ) lowercase__ : List[str]= ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase__ : Optional[Any]= LlamaModel(snake_case__ ) original_model.to(snake_case__ ) original_model.eval() lowercase__ : Optional[Any]= original_model(snake_case__ ).last_hidden_state lowercase__ : Union[str, Any]= original_model(snake_case__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase__ : str= {"type": scaling_type, "factor": 10.0} lowercase__ : int= LlamaModel(snake_case__ ) scaled_model.to(snake_case__ ) scaled_model.eval() lowercase__ : Tuple= scaled_model(snake_case__ ).last_hidden_state lowercase__ : Any= scaled_model(snake_case__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) ) @require_torch class __UpperCAmelCase( unittest.TestCase ): @unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= [1, 306, 4658, 278, 6593, 310, 2834, 338] lowercase__ : List[Any]= LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" ) lowercase__ : Optional[int]= model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 lowercase__ : str= torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] ) torch.testing.assert_close(out.mean(-1 ) , snake_case__ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowercase__ : Optional[Any]= torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , snake_case__ , atol=1e-5 , rtol=1e-5 ) @unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= [1, 306, 4658, 278, 6593, 310, 2834, 338] lowercase__ : int= LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" ) lowercase__ : List[Any]= model(torch.tensor(snake_case__ ) ) # Expected mean on dim = -1 lowercase__ : Any= torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] ) torch.testing.assert_close(out.mean(-1 ) , snake_case__ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowercase__ : List[Any]= torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , snake_case__ , atol=1e-5 , rtol=1e-5 ) @unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= [1, 306, 4658, 278, 6593, 310, 2834, 338] lowercase__ : List[str]= LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" ) lowercase__ : List[str]= model(torch.tensor(snake_case__ ) ) # Expected mean on dim = -1 lowercase__ : Optional[int]= torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] ) torch.testing.assert_close(out.mean(-1 ) , snake_case__ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowercase__ : Any= torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , snake_case__ , atol=1e-2 , rtol=1e-2 ) @unittest.skip( "Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= [1, 306, 4658, 278, 6593, 310, 2834, 338] lowercase__ : str= LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" ) lowercase__ : Tuple= model(torch.tensor(snake_case__ ) ) lowercase__ : List[Any]= torch.tensor( [[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , snake_case__ , atol=1e-2 , rtol=1e-2 ) # fmt: off lowercase__ : Dict= torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , snake_case__ , atol=1e-5 , rtol=1e-5 ) @unittest.skip("Model is curently gated" ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi" lowercase__ : Tuple= "Simply put, the theory of relativity states that " lowercase__ : Optional[int]= LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" ) lowercase__ : List[Any]= tokenizer.encode(snake_case__ , return_tensors="pt" ) lowercase__ : List[Any]= LlamaForCausalLM.from_pretrained( "meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=snake_case__ ) # greedy generation outputs lowercase__ : Optional[Any]= model.generate(snake_case__ , max_new_tokens=64 , top_p=snake_case__ , temperature=1 , do_sample=snake_case__ ) lowercase__ : List[Any]= tokenizer.decode(generated_ids[0] , skip_special_tokens=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ )
704
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
0
import math def lowercase__(A ) ->Tuple: """simple docstring""" lowercase__ : Dict= 0 lowercase__ : str= 0 while num > 0: lowercase__ : List[str]= num % 8 lowercase__ : Any= octal + (remainder * math.floor(math.pow(10 , A ) )) counter += 1 lowercase__ : Dict= math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return f'''0o{int(A )}''' def lowercase__() ->Optional[Any]: """simple docstring""" print("\n2 in octal is:" ) print(decimal_to_octal(2 ) ) # = 2 print("\n8 in octal is:" ) print(decimal_to_octal(8 ) ) # = 10 print("\n65 in octal is:" ) print(decimal_to_octal(65 ) ) # = 101 print("\n216 in octal is:" ) print(decimal_to_octal(216 ) ) # = 330 print("\n512 in octal is:" ) print(decimal_to_octal(512 ) ) # = 1000 print("\n" ) if __name__ == "__main__": main()
705
"""simple docstring""" def lowercase__(A ) ->list: """simple docstring""" if n_term == "": return [] lowercase__ : list= [] for temp in range(int(A ) ): series.append(f'''1/{temp + 1}''' if series else "1" ) return series if __name__ == "__main__": a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""") print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""") print(harmonic_series(nth_term))
85
0
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging a : Dict = logging.get_logger(__name__) def lowercase__(A , A ) ->Any: """simple docstring""" try: with open(snake_case__ , "rb" ) as flax_state_f: lowercase__ : int= from_bytes(snake_case__ , flax_state_f.read() ) except UnpicklingError as e: try: with open(snake_case__ ) as f: if f.read().startswith("version" ): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please" " install git-lfs and run `git lfs install` followed by `git lfs pull` in the" " folder you cloned." ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f'''Unable to convert {model_file} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def lowercase__(A , A ) ->Tuple: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( "Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see" " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" " instructions." ) raise # check if we have bf16 weights lowercase__ : List[Any]= flatten_dict(jax.tree_util.tree_map(lambda A : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( "Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` " "before loading those in PyTorch model." ) lowercase__ : Tuple= jax.tree_util.tree_map( lambda A : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) lowercase__ : Optional[int]= """""" lowercase__ : Dict= flatten_dict(snake_case__ , sep="." ) lowercase__ : int= pt_model.state_dict() # keep track of unexpected & missing keys lowercase__ : Optional[Any]= [] lowercase__ : str= set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowercase__ : Tuple= flax_key_tuple.split("." ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowercase__ : str= flax_key_tuple_array[:-1] + ["""weight"""] lowercase__ : str= jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowercase__ : List[str]= flax_key_tuple_array[:-1] + ["""weight"""] lowercase__ : str= flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowercase__ : Union[str, Any]= flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(snake_case__ ): lowercase__ : str= ( flax_key_tuple_string.replace("_0" , ".0" ) .replace("_1" , ".1" ) .replace("_2" , ".2" ) .replace("_3" , ".3" ) .replace("_4" , ".4" ) .replace("_5" , ".5" ) .replace("_6" , ".6" ) .replace("_7" , ".7" ) .replace("_8" , ".8" ) .replace("_9" , ".9" ) ) lowercase__ : Dict= """.""".join(snake_case__ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowercase__ : Optional[Any]= np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor lowercase__ : Optional[Any]= torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list lowercase__ : Tuple= list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( "Some weights of the Flax model were not used when initializing the PyTorch model" f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' " (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This" f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' " to be exactly identical (e.g. initializing a BertForSequenceClassification model from a" " FlaxBertForSequenceClassification model)." ) if len(snake_case__ ) > 0: logger.warning( f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' " use it for predictions and inference." ) return pt_model
706
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : int = logging.get_logger(__name__) a : str = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "big_bird" def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , ) lowercase__ : Dict= vocab_size lowercase__ : Optional[int]= max_position_embeddings lowercase__ : List[Any]= hidden_size lowercase__ : List[str]= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : Optional[int]= intermediate_size lowercase__ : Optional[int]= hidden_act lowercase__ : Tuple= hidden_dropout_prob lowercase__ : int= attention_probs_dropout_prob lowercase__ : int= initializer_range lowercase__ : List[Any]= type_vocab_size lowercase__ : Union[str, Any]= layer_norm_eps lowercase__ : Optional[Any]= use_cache lowercase__ : Union[str, Any]= rescale_embeddings lowercase__ : Union[str, Any]= attention_type lowercase__ : Any= use_bias lowercase__ : List[Any]= block_size lowercase__ : Optional[Any]= num_random_blocks lowercase__ : Optional[int]= classifier_dropout class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def UpperCAmelCase_ ( self ): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : Tuple= {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
85
0
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def lowercase__(A ) ->bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase__() ->Iterator[int]: """simple docstring""" lowercase__ : Any= 2 while True: if is_prime(__snake_case ): yield num num += 1 def lowercase__(A = 2_000_000 ) ->int: """simple docstring""" return sum(takewhile(lambda A : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"""{solution() = }""")
707
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
85
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a : Optional[int] = { "configuration_mask2former": [ "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "Mask2FormerConfig", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Any = ["Mask2FormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Union[str, Any] = [ "MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "Mask2FormerForUniversalSegmentation", "Mask2FormerModel", "Mask2FormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
708
"""simple docstring""" from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def lowercase__(A , A ) ->List[Any]: """simple docstring""" lowercase__ : str= [] for part_id in partition_order: lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(A ): expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Dict= Spark(A ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(2 ) lowercase__ : Optional[Any]= [1, 0] lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions. lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->int: """simple docstring""" lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(1 ) lowercase__ : str= SparkExamplesIterable(A ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(A ): assert row_id == f'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : int= spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: lowercase__ : Optional[Any]= lambda A : x.reverse() lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] ) lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Any: """simple docstring""" lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 ) # Partitions 0 and 2 lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Optional[int]= Spark(A ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
85
0
import random def lowercase__(A , A ) ->tuple: """simple docstring""" lowercase__ : List[Any]= [], [], [] for element in data: if element < pivot: less.append(_lowercase ) elif element > pivot: greater.append(_lowercase ) else: equal.append(_lowercase ) return less, equal, greater def lowercase__(A , A ) ->Dict: """simple docstring""" if index >= len(_lowercase ) or index < 0: return None lowercase__ : Optional[int]= items[random.randint(0 , len(_lowercase ) - 1 )] lowercase__ : Dict= 0 lowercase__ : str= _partition(_lowercase , _lowercase ) lowercase__ : List[Any]= len(_lowercase ) lowercase__ : int= len(_lowercase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(_lowercase , _lowercase ) # must be in larger else: return quick_select(_lowercase , index - (m + count) )
709
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ): '''simple docstring''' lowercase__ : Optional[int]= parent lowercase__ : Tuple= batch_size lowercase__ : Tuple= seq_length lowercase__ : str= is_training lowercase__ : str= use_input_lengths lowercase__ : Any= use_token_type_ids lowercase__ : List[Any]= use_labels lowercase__ : Optional[int]= gelu_activation lowercase__ : str= sinusoidal_embeddings lowercase__ : List[str]= causal lowercase__ : Any= asm lowercase__ : Optional[int]= n_langs lowercase__ : Union[str, Any]= vocab_size lowercase__ : int= n_special lowercase__ : Any= hidden_size lowercase__ : int= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : List[str]= hidden_dropout_prob lowercase__ : str= attention_probs_dropout_prob lowercase__ : Any= max_position_embeddings lowercase__ : List[Any]= type_vocab_size lowercase__ : int= type_sequence_label_size lowercase__ : Any= initializer_range lowercase__ : Optional[int]= num_labels lowercase__ : Union[str, Any]= num_choices lowercase__ : List[Any]= summary_type lowercase__ : Optional[int]= use_proj lowercase__ : int= scope def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Tuple= None if self.use_input_lengths: lowercase__ : List[Any]= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase__ : Tuple= None if self.use_token_type_ids: lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase__ : str= None lowercase__ : Tuple= None lowercase__ : Dict= None if self.use_labels: lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float() lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : List[Any]= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase_ ( self ): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : Any= FlaubertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ ) lowercase__ : str= model(snake_case__ , langs=snake_case__ ) lowercase__ : Any= model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : List[str]= model(snake_case__ ) lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= model(snake_case__ ) lowercase__ : Any= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , ) lowercase__ : List[str]= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , ) ((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple() lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) ((lowercase__), ) : List[Any]= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ ) lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= self.num_labels lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : int= self.num_choices lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Any= model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ) : Any= config_and_inputs lowercase__ : Tuple= { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) __lowerCamelCase = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowercase__ : List[Any]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) lowercase__ : List[str]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModelTester(self ) lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowercase__ : int= True lowercase__ : List[Any]= model_class(config=snake_case__ ) lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ ) lowercase__ : Dict= torch.jit.trace( snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) ) lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ ) loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) ) @require_torch class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): lowercase__ : Optional[int]= model(snake_case__ )[0] lowercase__ : Optional[int]= torch.Size((1, 11, 768) ) self.assertEqual(output.shape , snake_case__ ) lowercase__ : Dict= torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
85
0
"""simple docstring""" import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=4 , ): '''simple docstring''' lowercase__ : str= parent lowercase__ : List[str]= batch_size lowercase__ : Tuple= seq_length lowercase__ : Any= is_training lowercase__ : Dict= use_attention_mask lowercase__ : List[Any]= use_token_type_ids lowercase__ : Union[str, Any]= use_labels lowercase__ : Union[str, Any]= vocab_size lowercase__ : Dict= hidden_size lowercase__ : List[Any]= num_hidden_layers lowercase__ : Optional[int]= num_attention_heads lowercase__ : Optional[int]= intermediate_size lowercase__ : int= hidden_act lowercase__ : Optional[Any]= hidden_dropout_prob lowercase__ : Optional[Any]= attention_probs_dropout_prob lowercase__ : Dict= max_position_embeddings lowercase__ : Optional[int]= type_vocab_size lowercase__ : int= type_sequence_label_size lowercase__ : int= initializer_range lowercase__ : str= num_choices def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Optional[Any]= None if self.use_attention_mask: lowercase__ : Optional[Any]= random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : List[str]= None if self.use_token_type_ids: lowercase__ : Union[str, Any]= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : Dict= RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.prepare_config_and_inputs() lowercase__, lowercase__, lowercase__, lowercase__ : List[str]= config_and_inputs lowercase__ : Union[str, Any]= {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.prepare_config_and_inputs() lowercase__, lowercase__, lowercase__, lowercase__ : Any= config_and_inputs lowercase__ : Union[str, Any]= True lowercase__ : int= floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase__ : str= ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __UpperCAmelCase( UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = True __lowerCamelCase = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= FlaxRobertaModelTester(self ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: lowercase__ : Optional[int]= model_class_name.from_pretrained("roberta-base" , from_pt=__lowerCAmelCase ) lowercase__ : Any= model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCAmelCase )
710
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = None class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 2 @register_to_config def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ): '''simple docstring''' # standard deviation of the initial noise distribution lowercase__ : int= sigma_max # setable values lowercase__ : int= None lowercase__ : np.IntTensor= None lowercase__ : torch.FloatTensor= None # sigma(t_i) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' return sample def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' lowercase__ : List[Any]= num_inference_steps lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy() lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ ) lowercase__ : Union[str, Any]= [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: lowercase__ : str= 0 # sample eps ~ N(0, S_noise^2 * I) lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device ) lowercase__ : str= sigma + gamma * sigma lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : int= sample_prev + sigma_prev * model_output lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' raise NotImplementedError()
85
0
"""simple docstring""" import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): a : Any = yaml.safe_load( """\ name: \"\" allow_empty: false allow_empty_text: true subsections: - name: \"Dataset Card for X\" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: \"Table of Contents\" allow_empty: false allow_empty_text: false subsections: null - name: \"Dataset Description\" allow_empty: false allow_empty_text: false subsections: - name: \"Dataset Summary\" allow_empty: false allow_empty_text: false subsections: null - name: \"Supported Tasks and Leaderboards\" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null """ ) a : Optional[Any] = { """name""": """root""", """text""": """""", """is_empty_text""": True, """subsections""": [ { """name""": """Dataset Card for My Dataset""", """text""": """""", """is_empty_text""": True, """subsections""": [ {"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []}, { """name""": """Dataset Description""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [ { """name""": """Dataset Summary""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [], }, { """name""": """Supported Tasks and Leaderboards""", """text""": """""", """is_empty_text""": True, """subsections""": [], }, {"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []}, ], }, ], } ], } a : int = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ a : Union[str, Any] = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text """ a : Tuple = { """name""": """root""", """text""": """""", """is_empty_text""": True, """subsections""": [ { """name""": """Dataset Card for My Dataset""", """text""": """""", """is_empty_text""": True, """subsections""": [ {"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []}, { """name""": """Dataset Description""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [ { """name""": """Dataset Summary""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [ { """name""": """Extra Ignored Subsection""", """text""": """""", """is_empty_text""": True, """subsections""": [], } ], }, { """name""": """Supported Tasks and Leaderboards""", """text""": """""", """is_empty_text""": True, """subsections""": [], }, {"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []}, ], }, ], } ], } a : Any = """\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ a : str = ( """The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.""" ) a : Tuple = """\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ a : Optional[Any] = ( """The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.""" ) a : Optional[int] = """\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ a : Dict = """The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.""" a : str = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text """ a : Any = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).""" a : Optional[Any] = """\ --- language: - zh - en --- # Dataset Card for My Dataset """ a : Union[str, Any] = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'.""" a : Dict = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text """ a : Dict = """The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.""" a : Tuple = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages """ a : str = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.""" a : Optional[Any] = """\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ a : Any = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.""" a : List[Any] = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset """ a : Tuple = """The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.""" a : List[str] = """\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ a : Tuple = """The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.""" a : Union[str, Any] = """""" a : Any = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.""" a : str = """\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ a : int = """The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.""" @pytest.mark.parametrize( "readme_md, expected_dict" , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowercase__(A , A ) ->List[Any]: """simple docstring""" assert ReadMe.from_string(_snake_case , _snake_case ).to_dict() == expected_dict @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowercase__(A , A ) ->List[Any]: """simple docstring""" with pytest.raises(_snake_case , match=re.escape(expected_error.format(path="root" ) ) ): lowercase__ : Optional[Any]= ReadMe.from_string(_snake_case , _snake_case ) readme.validate() @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowercase__(A , A ) ->List[Any]: """simple docstring""" with pytest.raises(_snake_case , match=re.escape(expected_error.format(path="root" ) ) ): ReadMe.from_string(_snake_case , _snake_case ) @pytest.mark.parametrize( "readme_md," , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowercase__(A ) ->Optional[Any]: """simple docstring""" ReadMe.from_string(_snake_case , _snake_case , suppress_parsing_errors=_snake_case ) @pytest.mark.parametrize( "readme_md, expected_dict" , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowercase__(A , A ) ->Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ : Optional[Any]= Path(_snake_case ) / "README.md" with open(_snake_case , "w+" ) as readme_file: readme_file.write(_snake_case ) lowercase__ : Any= ReadMe.from_readme(_snake_case , _snake_case ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowercase__(A , A ) ->Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ : List[str]= Path(_snake_case ) / "README.md" with open(_snake_case , "w+" ) as readme_file: readme_file.write(_snake_case ) lowercase__ : List[str]= expected_error.format(path=_snake_case ) with pytest.raises(_snake_case , match=re.escape(_snake_case ) ): lowercase__ : int= ReadMe.from_readme(_snake_case , _snake_case ) readme.validate() @pytest.mark.parametrize( "readme_md, expected_error" , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowercase__(A , A ) ->Any: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ : int= Path(_snake_case ) / "README.md" with open(_snake_case , "w+" ) as readme_file: readme_file.write(_snake_case ) lowercase__ : str= expected_error.format(path=_snake_case ) with pytest.raises(_snake_case , match=re.escape(_snake_case ) ): ReadMe.from_readme(_snake_case , _snake_case ) @pytest.mark.parametrize( "readme_md," , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowercase__(A ) ->Dict: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ : Optional[Any]= Path(_snake_case ) / "README.md" with open(_snake_case , "w+" ) as readme_file: readme_file.write(_snake_case ) ReadMe.from_readme(_snake_case , _snake_case , suppress_parsing_errors=_snake_case )
711
"""simple docstring""" from ....utils import logging a : List[str] = logging.get_logger(__name__) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ): '''simple docstring''' lowercase__ : Dict= config.__dict__ lowercase__ : str= modal_hidden_size if num_labels: lowercase__ : List[str]= num_labels
85
0
"""simple docstring""" import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder a : List[str] = 'base_with_context' def lowercase__(A , A ) ->Any: """simple docstring""" lowercase__ : Dict= nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) ) lowercase__ : Union[str, Any]= nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowercase ) for lyr_num, lyr in enumerate(model.encoders ): lowercase__ : Optional[int]= weights[f'''layers_{lyr_num}'''] lowercase__ : List[Any]= nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) ) lowercase__ : Dict= ly_weight['''attention'''] lowercase__ : Union[str, Any]= nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) lowercase__ : Dict= nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) lowercase__ : Dict= nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) lowercase__ : int= nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) lowercase__ : List[Any]= nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) lowercase__ : List[str]= nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) lowercase__ : str= nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) lowercase__ : int= nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) lowercase__ : Optional[Any]= nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) ) return model def lowercase__(A , A ) ->Tuple: """simple docstring""" lowercase__ : List[str]= nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) ) lowercase__ : Dict= nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowercase ) for lyr_num, lyr in enumerate(model.encoders ): lowercase__ : Optional[Any]= weights[f'''layers_{lyr_num}'''] lowercase__ : Union[str, Any]= ly_weight['''attention'''] lowercase__ : Any= nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) lowercase__ : Union[str, Any]= nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) lowercase__ : str= nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) lowercase__ : int= nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) lowercase__ : Optional[int]= nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) ) lowercase__ : Optional[Any]= nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) lowercase__ : Any= nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) lowercase__ : List[Any]= nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) lowercase__ : Union[str, Any]= nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) lowercase__ : Optional[int]= nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) ) return model def lowercase__(A , A ) ->List[Any]: """simple docstring""" lowercase__ : List[Any]= nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) ) lowercase__ : Optional[int]= nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) ) lowercase__ : Dict= nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowercase ) lowercase__ : List[str]= nn.Parameter( torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) ) for lyr_num, lyr in enumerate(model.decoders ): lowercase__ : str= weights[f'''layers_{lyr_num}'''] lowercase__ : List[str]= nn.Parameter( torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) ) lowercase__ : List[str]= nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) ) lowercase__ : str= ly_weight['''self_attention'''] lowercase__ : str= nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) lowercase__ : int= nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) lowercase__ : int= nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) lowercase__ : Optional[int]= nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) lowercase__ : List[str]= ly_weight['''MultiHeadDotProductAttention_0'''] lowercase__ : Dict= nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) lowercase__ : Dict= nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) lowercase__ : int= nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) lowercase__ : Any= nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) lowercase__ : Tuple= nn.Parameter( torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) ) lowercase__ : List[Any]= nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) lowercase__ : Tuple= nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) ) lowercase__ : Tuple= nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) lowercase__ : Optional[int]= nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) lowercase__ : Any= nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) lowercase__ : Union[str, Any]= nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) ) lowercase__ : List[Any]= nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) ) return model def lowercase__(A ) ->Tuple: """simple docstring""" lowercase__ : Any= checkpoints.load_tax_checkpoint(args.checkpoint_path ) lowercase__ : Tuple= jnp.tree_util.tree_map(onp.array , _lowercase ) lowercase__ : Optional[int]= [ '''from __gin__ import dynamic_registration''', '''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''', '''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''', '''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''', ] lowercase__ : int= os.path.join(args.checkpoint_path , ".." , "config.gin" ) lowercase__ : Union[str, Any]= inference.parse_training_gin_file(_lowercase , _lowercase ) lowercase__ : Optional[int]= inference.InferenceModel(args.checkpoint_path , _lowercase ) lowercase__ : Optional[Any]= DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" ) lowercase__ : Optional[int]= SpectrogramNotesEncoder( max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) lowercase__ : Optional[int]= SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) lowercase__ : Dict= TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) lowercase__ : Any= load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , _lowercase ) lowercase__ : Union[str, Any]= load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , _lowercase ) lowercase__ : Optional[Any]= load_decoder(ta_checkpoint["target"]["decoder"] , _lowercase ) lowercase__ : Union[str, Any]= OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" ) lowercase__ : List[Any]= SpectrogramDiffusionPipeline( notes_encoder=_lowercase , continuous_encoder=_lowercase , decoder=_lowercase , scheduler=_lowercase , melgan=_lowercase , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": a : Dict = argparse.ArgumentParser() parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""") parser.add_argument( """--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not.""" ) parser.add_argument( """--checkpoint_path""", default=F"""{MODEL}/checkpoint_500000""", type=str, required=False, help="""Path to the original jax model checkpoint.""", ) a : List[str] = parser.parse_args() main(args)
712
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase__(A ) ->int: """simple docstring""" lowercase__ : Optional[int]= [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def lowercase__(A , A ) ->Any: """simple docstring""" lowercase__ : Any= [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def lowercase__(A ) ->List[Any]: """simple docstring""" lowercase__ : Dict= [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") ) return token def lowercase__() ->Union[str, Any]: """simple docstring""" lowercase__ : Dict= [] head.append(("layernorm.weight", "norm.weight") ) head.append(("layernorm.bias", "norm.bias") ) head.append(("classifier.weight", "head.weight") ) head.append(("classifier.bias", "head.bias") ) return head def lowercase__(A , A , A , A ) ->Optional[int]: """simple docstring""" lowercase__ : List[str]= "imagenet-1k-id2label.json" lowercase__ : List[str]= 1_000 lowercase__ : Tuple= "huggingface/label-files" lowercase__ : int= num_labels lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) ) lowercase__ : str= {int(A ): v for k, v in idalabel.items()} lowercase__ : Optional[int]= idalabel lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()} lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13": lowercase__ : int= [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21": lowercase__ : Union[str, Any]= [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Optional[Any]= [2, 2, 20] lowercase__ : Optional[Any]= [3, 12, 16] lowercase__ : List[str]= [192, 768, 1_024] lowercase__ : List[str]= CvtForImageClassification(A ) lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) lowercase__ : Dict= image_size lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) ) lowercase__ : Optional[Any]= OrderedDict() lowercase__ : Tuple= [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowercase__ : Optional[int]= list_of_state_dict + cls_token(A ) lowercase__ : List[str]= list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): lowercase__ : Dict= list_of_state_dict + attention(A , A ) lowercase__ : Optional[Any]= list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): lowercase__ : str= original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": a : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--cvt_model""", default="""cvt-w24""", type=str, help="""Name of the cvt model you'd like to convert.""", ) parser.add_argument( """--image_size""", default=384, type=int, help="""Input Image Size""", ) parser.add_argument( """--cvt_file_name""", default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""", type=str, help="""Input Image Size""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a : Optional[int] = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
85
0
"""simple docstring""" from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class __UpperCAmelCase: """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = None # Automatically constructed __lowerCamelCase = "dict" __lowerCamelCase = None __lowerCamelCase = field(default="Translation" , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__ ) def __call__( self ): '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def UpperCAmelCase_ ( self ): '''simple docstring''' from .features import Value return {k: Value("string" ) for k in sorted(self.languages )} @dataclass class __UpperCAmelCase: """simple docstring""" __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None # Automatically constructed __lowerCamelCase = "dict" __lowerCamelCase = None __lowerCamelCase = field(default="TranslationVariableLanguages" , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= sorted(set(self.languages ) ) if self.languages else None lowercase__ : Union[str, Any]= len(self.languages ) if self.languages else None def __call__( self ): '''simple docstring''' return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : int= set(self.languages ) if self.languages and set(_SCREAMING_SNAKE_CASE ) - lang_set: raise ValueError( F'''Some languages in example ({', '.join(sorted(set(_SCREAMING_SNAKE_CASE ) - lang_set ) )}) are not in valid set ({', '.join(_SCREAMING_SNAKE_CASE )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. lowercase__ : Optional[Any]= [] for lang, text in translation_dict.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. lowercase__, lowercase__ : Dict= zip(*sorted(_SCREAMING_SNAKE_CASE ) ) return {"language": languages, "translation": translations} def UpperCAmelCase_ ( self ): '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value("string" ) ), "translation": Sequence(Value("string" ) ), }
713
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() self.register_modules(unet=snake_case__ , scheduler=snake_case__ ) @torch.no_grad() def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[Any]= self.unet.config.sample_size lowercase__ : Dict= (batch_size, 3, img_size, img_size) lowercase__ : List[Any]= self.unet lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma lowercase__ : Tuple= sample.to(self.device ) self.scheduler.set_timesteps(snake_case__ ) self.scheduler.set_sigmas(snake_case__ ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # prediction step lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ) lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean lowercase__ : List[str]= sample_mean.clamp(0 , 1 ) lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ : str= self.numpy_to_pil(snake_case__ ) if not return_dict: return (sample,) return ImagePipelineOutput(images=snake_case__ )
85
0
import baseaa def lowercase__(A ) ->int: """simple docstring""" return baseaa.baaencode(string.encode("utf-8" ) ) def lowercase__(A ) ->Union[str, Any]: """simple docstring""" return baseaa.baadecode(A ).decode("utf-8" ) if __name__ == "__main__": a : Union[str, Any] = """Hello World!""" a : Dict = baseaa_encode(test) print(encoded) a : str = baseaa_decode(encoded) print(decoded)
714
"""simple docstring""" def lowercase__(A ) ->list[int]: """simple docstring""" lowercase__ : List[str]= len(A ) for i in range(A ): for j in range(i + 1 , A ): if numbers[j] < numbers[i]: lowercase__, lowercase__ : List[str]= numbers[j], numbers[i] return numbers if __name__ == "__main__": a : Dict = input("""Enter numbers separated by a comma:\n""").strip() a : List[str] = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
85
0
"""simple docstring""" import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem a : Optional[Any] = importlib.util.find_spec("""s3fs""") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 a : Optional[int] = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase__(A ) ->Dict: if "://" in dataset_path: lowercase__ : List[Any]= dataset_path.split("://" )[1] return dataset_path def lowercase__(A ) ->Optional[int]: if fs is not None and fs.protocol != "file": return True else: return False def lowercase__(A , A , A ) ->Optional[int]: lowercase__ : List[Any]= not is_remote_filesystem(SCREAMING_SNAKE_CASE_ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE_ ) , fs._strip_protocol(SCREAMING_SNAKE_CASE_ ) ) else: fs.mv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , recursive=SCREAMING_SNAKE_CASE_ ) def lowercase__() ->Optional[Any]: if hasattr(fsspec.asyn , "reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: lowercase__ : List[str]= None lowercase__ : Tuple= None lowercase__ : List[Any]= threading.Lock()
715
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def lowercase__(A ) ->bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase__() ->Iterator[int]: """simple docstring""" lowercase__ : Union[str, Any]= 2 while True: if is_prime(A ): yield num num += 1 def lowercase__(A = 2_000_000 ) ->int: """simple docstring""" return sum(takewhile(lambda A : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"""{solution() = }""")
85
0
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def lowercase__(A ) ->Tuple: """simple docstring""" lowercase__ : Any= botoa.client("iam" ) lowercase__ : Any= { "Version": "2012-10-17", "Statement": [ {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=_lowerCAmelCase , AssumeRolePolicyDocument=json.dumps(_lowerCAmelCase , indent=2 ) ) lowercase__ : Dict= { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "sagemaker:*", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "ecr:BatchCheckLayerAvailability", "ecr:GetAuthorizationToken", "cloudwatch:PutMetricData", "cloudwatch:GetMetricData", "cloudwatch:GetMetricStatistics", "cloudwatch:ListMetrics", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:PutLogEvents", "logs:GetLogEvents", "s3:CreateBucket", "s3:ListBucket", "s3:GetBucketLocation", "s3:GetObject", "s3:PutObject", ], "Resource": "*", } ], } # attach policy to role iam_client.put_role_policy( RoleName=_lowerCAmelCase , PolicyName=f'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(_lowerCAmelCase , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def lowercase__(A ) ->Union[str, Any]: """simple docstring""" lowercase__ : int= botoa.client("iam" ) return iam_client.get_role(RoleName=_lowerCAmelCase )["Role"]["Arn"] def lowercase__() ->Dict: """simple docstring""" lowercase__ : str= _ask_options( "How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , _lowerCAmelCase , ) lowercase__ : Any= None if credentials_configuration == 0: lowercase__ : int= _ask_field("Enter your AWS Profile name: [default] " , default="default" ) lowercase__ : Optional[Any]= aws_profile else: print( "Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with," "`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" ) lowercase__ : str= _ask_field("AWS Access Key ID: " ) lowercase__ : List[str]= aws_access_key_id lowercase__ : Optional[int]= _ask_field("AWS Secret Access Key: " ) lowercase__ : Optional[int]= aws_secret_access_key lowercase__ : Union[str, Any]= _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" ) lowercase__ : Optional[int]= aws_region lowercase__ : int= _ask_options( "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , _lowerCAmelCase , ) if role_management == 0: lowercase__ : List[str]= _ask_field("Enter your IAM role name: " ) else: lowercase__ : Tuple= "accelerate_sagemaker_execution_role" print(f'''Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials''' ) _create_iam_role_for_sagemaker(_lowerCAmelCase ) lowercase__ : str= _ask_field( "Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="Please enter yes or no." , ) lowercase__ : Union[str, Any]= None if is_custom_docker_image: lowercase__ : Optional[Any]= _ask_field("Enter your Docker image: " , lambda A : str(_lowerCAmelCase ).lower() ) lowercase__ : int= _ask_field( "Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="Please enter yes or no." , ) lowercase__ : Any= None if is_sagemaker_inputs_enabled: lowercase__ : str= _ask_field( "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda A : str(_lowerCAmelCase ).lower() , ) lowercase__ : Optional[Any]= _ask_field( "Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="Please enter yes or no." , ) lowercase__ : int= None if is_sagemaker_metrics_enabled: lowercase__ : Dict= _ask_field( "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda A : str(_lowerCAmelCase ).lower() , ) lowercase__ : List[Any]= _ask_options( "What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , ) lowercase__ : Dict= {} lowercase__ : int= _ask_field( "Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="Please enter yes or no." , ) if use_dynamo: lowercase__ : Optional[Any]= "dynamo_" lowercase__ : Tuple= _ask_options( "Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) lowercase__ : Tuple= _ask_field( "Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="Please enter yes or no." , ) if use_custom_options: lowercase__ : Any= _ask_options( "Which mode do you want to use?" , _lowerCAmelCase , lambda A : TORCH_DYNAMO_MODES[int(_lowerCAmelCase )] , default="default" , ) lowercase__ : str= _ask_field( "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="Please enter yes or no." , ) lowercase__ : int= _ask_field( "Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=_lowerCAmelCase , error_message="Please enter yes or no." , ) lowercase__ : Any= "Which EC2 instance type you want to use for your training?" if distributed_type != SageMakerDistributedType.NO: lowercase__ : Union[str, Any]= _ask_options( _lowerCAmelCase , _lowerCAmelCase , lambda A : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_lowerCAmelCase )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" lowercase__ : Union[str, Any]= _ask_field(_lowerCAmelCase , lambda A : str(_lowerCAmelCase ).lower() , default="ml.p3.2xlarge" ) lowercase__ : Any= 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): lowercase__ : Union[str, Any]= _ask_field( "How many machines do you want use? [1]: " , _lowerCAmelCase , default=1 , ) lowercase__ : Optional[int]= _ask_options( "Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." ) return SageMakerConfig( image_uri=_lowerCAmelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_lowerCAmelCase , use_cpu=_lowerCAmelCase , dynamo_config=_lowerCAmelCase , eca_instance_type=_lowerCAmelCase , profile=_lowerCAmelCase , region=_lowerCAmelCase , iam_role_name=_lowerCAmelCase , mixed_precision=_lowerCAmelCase , num_machines=_lowerCAmelCase , sagemaker_inputs_file=_lowerCAmelCase , sagemaker_metrics_file=_lowerCAmelCase , )
716
"""simple docstring""" def lowercase__(A ) ->bool: """simple docstring""" lowercase__ : Tuple= (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowercase__(A = 5_000 ) ->int: """simple docstring""" lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )] for i, pentagonal_i in enumerate(A ): for j in range(A , len(A ) ): lowercase__ : List[Any]= pentagonal_nums[j] lowercase__ : int= pentagonal_i + pentagonal_j lowercase__ : Optional[int]= pentagonal_j - pentagonal_i if is_pentagonal(A ) and is_pentagonal(A ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
85
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=18 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=None , snake_case__=True , ): '''simple docstring''' lowercase__ : Optional[int]= size if size is not None else {"shortest_edge": 20} lowercase__ : Any= crop_size if crop_size is not None else {"height": 18, "width": 18} lowercase__ : Optional[int]= parent lowercase__ : List[str]= batch_size lowercase__ : int= num_channels lowercase__ : Dict= image_size lowercase__ : List[str]= min_resolution lowercase__ : Optional[int]= max_resolution lowercase__ : Optional[Any]= do_resize lowercase__ : List[str]= size lowercase__ : Any= do_center_crop lowercase__ : Optional[Any]= crop_size lowercase__ : Union[str, Any]= do_flip_channel_order def UpperCAmelCase_ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class __UpperCAmelCase( UpperCamelCase__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = MobileViTImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= MobileViTImageProcessingTester(self ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_resize" ) ) self.assertTrue(hasattr(snake_case__ , "size" ) ) self.assertTrue(hasattr(snake_case__ , "do_center_crop" ) ) self.assertTrue(hasattr(snake_case__ , "center_crop" ) ) self.assertTrue(hasattr(snake_case__ , "do_flip_channel_order" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 20} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) lowercase__ : Tuple= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def UpperCAmelCase_ ( self ): '''simple docstring''' pass def UpperCAmelCase_ ( self ): '''simple docstring''' # Initialize image_processing lowercase__ : int= self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ : Tuple= prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input lowercase__ : List[str]= image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowercase__ : Optional[Any]= image_processing(snake_case__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def UpperCAmelCase_ ( self ): '''simple docstring''' # Initialize image_processing lowercase__ : str= self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ : Dict= prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) # Test not batched input lowercase__ : Tuple= image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowercase__ : List[str]= image_processing(snake_case__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def UpperCAmelCase_ ( self ): '''simple docstring''' # Initialize image_processing lowercase__ : int= self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ : Dict= prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input lowercase__ : Dict= image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowercase__ : List[str]= image_processing(snake_case__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
717
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[str] = logging.get_logger(__name__) a : Union[str, Any] = { """google/pix2struct-textcaps-base""": ( """https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json""" ), } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_text_model" __lowerCamelCase = ["past_key_values"] __lowerCamelCase = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' lowercase__ : int= vocab_size lowercase__ : Optional[Any]= hidden_size lowercase__ : Tuple= d_kv lowercase__ : Optional[int]= d_ff lowercase__ : Any= num_layers lowercase__ : Dict= num_heads lowercase__ : List[Any]= relative_attention_num_buckets lowercase__ : Optional[Any]= relative_attention_max_distance lowercase__ : Dict= dropout_rate lowercase__ : Tuple= layer_norm_epsilon lowercase__ : str= initializer_factor lowercase__ : Any= use_cache lowercase__ : Optional[int]= eos_token_id lowercase__ : str= decoder_start_token_id # for backwards compatibility lowercase__ : Optional[Any]= dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : str= config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_vision_model" def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase__ : Tuple= hidden_size lowercase__ : Tuple= patch_embed_hidden_size lowercase__ : Optional[Any]= d_ff lowercase__ : Dict= dropout_rate lowercase__ : Any= num_hidden_layers lowercase__ : Optional[int]= num_attention_heads lowercase__ : Dict= initializer_range lowercase__ : Tuple= initializer_factor lowercase__ : Tuple= attention_dropout lowercase__ : Optional[Any]= layer_norm_eps lowercase__ : List[Any]= dense_act_fn lowercase__ : str= seq_len lowercase__ : List[str]= relative_attention_num_buckets lowercase__ : Union[str, Any]= relative_attention_max_distance lowercase__ : Dict= d_kv @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : Union[str, Any]= config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct" __lowerCamelCase = True def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowercase__ : List[Any]= {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: lowercase__ : str= {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) lowercase__ : str= PixaStructTextConfig(**snake_case__ ) lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ ) lowercase__ : int= self.text_config.decoder_start_token_id lowercase__ : List[Any]= self.text_config.pad_token_id lowercase__ : Any= self.text_config.eos_token_id lowercase__ : Any= initializer_factor lowercase__ : int= initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : Dict= is_vqa @classmethod def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ ) lowercase__ : str= self.text_config.to_dict() lowercase__ : str= self.vision_config.to_dict() lowercase__ : List[str]= self.__class__.model_type return output
85
0
"""simple docstring""" from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
718
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" ) lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy() lowercase__ : int= -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
85
0
import os import string import sys a : Optional[int] = 1 << 8 a : Any = { """tab""": ord("""\t"""), """newline""": ord("""\r"""), """esc""": 27, """up""": 65 + ARROW_KEY_FLAG, """down""": 66 + ARROW_KEY_FLAG, """right""": 67 + ARROW_KEY_FLAG, """left""": 68 + ARROW_KEY_FLAG, """mod_int""": 91, """undefined""": sys.maxsize, """interrupt""": 3, """insert""": 50, """delete""": 51, """pg_up""": 53, """pg_down""": 54, } a : Dict = KEYMAP["""up"""] a : str = KEYMAP["""left"""] if sys.platform == "win32": a : int = [] a : List[str] = { B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, } for i in range(10): a : Tuple = ord(str(i)) def lowercase__() ->Dict: """simple docstring""" if os.name == "nt": import msvcrt lowercase__ : Union[str, Any]= "mbcs" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(lowercase__ ) == 0: # Read the keystroke lowercase__ : Tuple= msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): lowercase__ : Optional[int]= ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: lowercase__ : Union[str, Any]= chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) ) WIN_CH_BUFFER.append(lowercase__ ) if ord(lowercase__ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) lowercase__ : List[str]= chr(KEYMAP["esc"] ) except KeyError: lowercase__ : Union[str, Any]= cha[1] else: lowercase__ : Tuple= ch.decode(lowercase__ ) else: lowercase__ : int= WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty lowercase__ : Tuple= sys.stdin.fileno() lowercase__ : str= termios.tcgetattr(lowercase__ ) try: tty.setraw(lowercase__ ) lowercase__ : Dict= sys.stdin.read(1 ) finally: termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ ) return ch def lowercase__() ->str: """simple docstring""" lowercase__ : Dict= get_raw_chars() if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(lowercase__ ) == KEYMAP["esc"]: lowercase__ : List[str]= get_raw_chars() if ord(lowercase__ ) == KEYMAP["mod_int"]: lowercase__ : Any= get_raw_chars() if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(lowercase__ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
719
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "tokenizer"] __lowerCamelCase = "BridgeTowerImageProcessor" __lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[int]= self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) # add pixel_values + pixel_mask lowercase__ : Optional[int]= self.image_processor( snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ ) encoding.update(snake_case__ ) return encoding def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.tokenizer.model_input_names lowercase__ : List[Any]= self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
85
0
"""simple docstring""" import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() a : Dict = logging.get_logger() def lowercase__(A , A , A , A , A = True ) ->Dict: """simple docstring""" print(f'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": lowercase__ : Optional[Any]= timm.create_model("levit_128s" , pretrained=UpperCamelCase__ ) else: lowercase__ : Dict= timm.create_model("levit_128" , pretrained=UpperCamelCase__ ) if hidden_sizes == 192: lowercase__ : Tuple= timm.create_model("levit_192" , pretrained=UpperCamelCase__ ) if hidden_sizes == 256: lowercase__ : Optional[int]= timm.create_model("levit_256" , pretrained=UpperCamelCase__ ) if hidden_sizes == 384: lowercase__ : Dict= timm.create_model("levit_384" , pretrained=UpperCamelCase__ ) from_model.eval() lowercase__ : Optional[Any]= LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval() lowercase__ : Tuple= OrderedDict() lowercase__ : Optional[Any]= from_model.state_dict() lowercase__ : str= list(from_model.state_dict().keys() ) lowercase__ : List[Any]= list(our_model.state_dict().keys() ) print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for i in range(len(UpperCamelCase__ ) ): lowercase__ : str= weights[og_keys[i]] our_model.load_state_dict(UpperCamelCase__ ) lowercase__ : int= torch.randn((2, 3, 224, 224) ) lowercase__ : Any= from_model(UpperCamelCase__ ) lowercase__ : List[Any]= our_model(UpperCamelCase__ ).logits assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one." lowercase__ : Dict= name print(UpperCamelCase__ ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) lowercase__ : Optional[int]= LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f'''Pushed {checkpoint_name}''' ) def lowercase__(A , A = None , A = True ) ->Optional[int]: """simple docstring""" lowercase__ : Optional[Any]= """imagenet-1k-id2label.json""" lowercase__ : List[Any]= 1_000 lowercase__ : Dict= (1, num_labels) lowercase__ : List[Any]= """huggingface/label-files""" lowercase__ : Optional[int]= num_labels lowercase__ : List[str]= json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) , "r" ) ) lowercase__ : Any= {int(UpperCamelCase__ ): v for k, v in idalabel.items()} lowercase__ : List[Any]= idalabel lowercase__ : str= {v: k for k, v in idalabel.items()} lowercase__ : Tuple= partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ ) lowercase__ : Optional[int]= { """levit-128S""": 128, """levit-128""": 128, """levit-192""": 192, """levit-256""": 256, """levit-384""": 384, } lowercase__ : List[Any]= { """levit-128S""": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-128""": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-192""": ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-256""": ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-384""": ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, expected_shape if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) a : List[Any] = parser.parse_args() a : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
720
"""simple docstring""" import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= tempfile.mkdtemp() lowercase__ : Optional[Any]= 8 # DPR tok lowercase__ : Tuple= [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok lowercase__ : List[Any]= [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple= {"unk_token": "<unk>"} lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case__ ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.get_dummy_dataset() lowercase__ : Optional[Any]= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Tuple= dataset lowercase__ : Optional[int]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Dict= self.get_dummy_dataset() lowercase__ : Tuple= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" ) lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) ) del dataset lowercase__ : List[Any]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowercase__ : Optional[int]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , ) return retriever def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) ) lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" ) lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(snake_case__ , open(snake_case__ , "wb" ) ) lowercase__ : List[Any]= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) lowercase__ : Optional[Any]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= 1 lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever() lowercase__ : Union[str, Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Tuple= self.get_dummy_dataset() retriever.save_pretrained(snake_case__ ) lowercase__ : int= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : Any= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= 1 lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) lowercase__ : Union[str, Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : int= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : Tuple= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= 1 lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) lowercase__ : List[str]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : int= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= 1 lowercase__ : int= self.get_dummy_legacy_index_retriever() lowercase__ : Optional[Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : str= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase_ ( self ): '''simple docstring''' import torch lowercase__ : str= 1 lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever() lowercase__ : str= [[5, 7], [10, 11]] lowercase__ : List[str]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) lowercase__, lowercase__, lowercase__ : Optional[int]= ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , np.ndarray ) lowercase__ : Any= retriever( snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , ) lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer() lowercase__ : Dict= 1 lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) retriever.set_ctx_encoder_tokenizer(snake_case__ ) lowercase__ : List[str]= [[5, 7], [10, 11]] lowercase__ : Any= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) self.assertEqual( len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
85
0
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
721
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "tokenizer"] __lowerCamelCase = "AutoImageProcessor" __lowerCamelCase = "AutoTokenizer" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) lowercase__ : List[Any]= self.image_processor def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ): '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if images is not None: lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if text is not None and images is not None: lowercase__ : Any= image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
85
0
"""simple docstring""" a : int = { """Pillow""": """Pillow""", """accelerate""": """accelerate>=0.11.0""", """compel""": """compel==0.1.8""", """black""": """black~=23.1""", """datasets""": """datasets""", """filelock""": """filelock""", """flax""": """flax>=0.4.1""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.13.2""", """requests-mock""": """requests-mock==1.10.0""", """importlib_metadata""": """importlib_metadata""", """invisible-watermark""": """invisible-watermark""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2""", """jaxlib""": """jaxlib>=0.1.65""", """Jinja2""": """Jinja2""", """k-diffusion""": """k-diffusion>=0.0.12""", """torchsde""": """torchsde""", """note_seq""": """note_seq""", """librosa""": """librosa""", """numpy""": """numpy""", """omegaconf""": """omegaconf""", """parameterized""": """parameterized""", """protobuf""": """protobuf>=3.20.3,<4""", """pytest""": """pytest""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """ruff""": """ruff>=0.0.241""", """safetensors""": """safetensors""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """scipy""": """scipy""", """onnx""": """onnx""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """tensorboard""": """tensorboard""", """torch""": """torch>=1.4""", """torchvision""": """torchvision""", """transformers""": """transformers>=4.25.1""", """urllib3""": """urllib3<=2.0.0""", }
700
"""simple docstring""" a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/""" def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ): lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(A ) lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data ) lowercase__ : Tuple= len(A ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(A ) % 6) else: lowercase__ : str= b"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(A ) , 6 ) ).encode() + padding ) def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ) and not isinstance(A , A ): lowercase__ : str= ( "argument should be a bytes-like object or ASCII string, " f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(A , A ): try: lowercase__ : Optional[Any]= encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) lowercase__ : List[Any]= encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase__ : str= encoded_data[:-padding] lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data ) lowercase__ : Any= [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(A ) , 8 ) ] return bytes(A ) if __name__ == "__main__": import doctest doctest.testmod()
85
0
"""simple docstring""" import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : str = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = GPTSwaTokenizer __lowerCamelCase = False __lowerCamelCase = True __lowerCamelCase = False def UpperCAmelCase_ ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase__ : Optional[Any]= GPTSwaTokenizer(snake_case__ , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : int= "This is a test" lowercase__ : Optional[int]= "This is a test" return input_text, output_text def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= "<s>" lowercase__ : List[Any]= 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(snake_case__ ) , 2000 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 2000 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= GPTSwaTokenizer(snake_case__ ) lowercase__ : int= tokenizer.tokenize("This is a test" ) self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [465, 287, 265, 631, 842] ) lowercase__ : Dict= tokenizer.tokenize("I was born in 92000, and this is falsé." ) # fmt: off self.assertListEqual( snake_case__ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , ) # fmt: on lowercase__ : Any= tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) lowercase__ : Dict= tokenizer.convert_ids_to_tokens(snake_case__ ) # fmt: off self.assertListEqual( snake_case__ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ) # fmt: on def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= GPTSwaTokenizer(snake_case__ ) lowercase__ : List[str]= ["This is a test", "I was born in 92000, and this is falsé."] lowercase__ : Union[str, Any]= [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(snake_case__ , snake_case__ ): self.assertListEqual(tokenizer.encode_fast(snake_case__ ) , snake_case__ ) # Test that decode_fast returns the input text for text, token_ids in zip(snake_case__ , snake_case__ ): self.assertEqual(tokenizer.decode_fast(snake_case__ ) , snake_case__ ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= [ "<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off lowercase__ : Optional[int]= {"input_ids": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name="AI-Sweden/gpt-sw3-126m" , sequences=snake_case__ , )
701
"""simple docstring""" from __future__ import annotations def lowercase__(A ) ->list[int]: # This function is recursive """simple docstring""" lowercase__ : int= len(A ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else lowercase__ : str= array[0] lowercase__ : Optional[Any]= False lowercase__ : Any= 1 lowercase__ : list[int]= [] while not is_found and i < array_length: if array[i] < pivot: lowercase__ : Union[str, Any]= True lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]] lowercase__ : Union[str, Any]= longest_subsequence(A ) if len(A ) > len(A ): lowercase__ : List[str]= temp_array else: i += 1 lowercase__ : List[str]= [element for element in array[1:] if element >= pivot] lowercase__ : List[str]= [pivot, *longest_subsequence(A )] if len(A ) > len(A ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
85
0
"""simple docstring""" import re def lowercase__(A ) ->str: """simple docstring""" if len(re.findall("[ATCG]" , lowercase__ ) ) != len(lowercase__ ): raise ValueError("Invalid Strand" ) return dna.translate(dna.maketrans("ATCG" , "TAGC" ) ) if __name__ == "__main__": import doctest doctest.testmod()
702
"""simple docstring""" import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": a : int = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) a : List[str] = parser.parse_args() a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) a : Optional[Any] = CLIPImageProcessor() a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") a : Tuple = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
85
0
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters a : int = (720, 1280) # Height, Width a : Union[str, Any] = (0.4, 0.6) # if height or width lower than this scale, drop it. a : List[str] = 1 / 100 a : List[Any] = '' a : Union[str, Any] = '' a : List[str] = '' a : Optional[int] = 250 def lowercase__() ->Union[str, Any]: """simple docstring""" lowercase__ : List[str]= get_dataset(lowerCAmelCase_ , lowerCAmelCase_ ) for index in range(lowerCAmelCase_ ): lowercase__ : Dict= random.sample(range(len(lowerCAmelCase_ ) ) , 4 ) lowercase__ : List[Any]= update_image_and_anno( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , filter_scale=lowerCAmelCase_ , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' lowercase__ : Optional[Any]= random_chars(32 ) lowercase__ : List[Any]= path.split(os.sep )[-1].rsplit("." , 1 )[0] lowercase__ : str= f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}''' cva.imwrite(f'''{file_root}.jpg''' , lowerCAmelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' ) lowercase__ : Optional[int]= [] for anno in new_annos: lowercase__ : str= anno[3] - anno[1] lowercase__ : Any= anno[4] - anno[2] lowercase__ : Dict= anno[1] + width / 2 lowercase__ : Union[str, Any]= anno[2] + height / 2 lowercase__ : Union[str, Any]= f'''{anno[0]} {x_center} {y_center} {width} {height}''' annos_list.append(lowerCAmelCase_ ) with open(f'''{file_root}.txt''' , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def lowercase__(A , A ) ->Dict: """simple docstring""" lowercase__ : Dict= [] lowercase__ : str= [] for label_file in glob.glob(os.path.join(lowerCAmelCase_ , "*.txt" ) ): lowercase__ : Union[str, Any]= label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(lowerCAmelCase_ ) as in_file: lowercase__ : int= in_file.readlines() lowercase__ : Union[str, Any]= os.path.join(lowerCAmelCase_ , f'''{label_name}.jpg''' ) lowercase__ : Any= [] for obj_list in obj_lists: lowercase__ : Dict= obj_list.rstrip("\n" ).split(" " ) lowercase__ : Union[str, Any]= float(obj[1] ) - float(obj[3] ) / 2 lowercase__ : str= float(obj[2] ) - float(obj[4] ) / 2 lowercase__ : Any= float(obj[1] ) + float(obj[3] ) / 2 lowercase__ : Optional[Any]= float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(lowerCAmelCase_ ) labels.append(lowerCAmelCase_ ) return img_paths, labels def lowercase__(A , A , A , A , A , A = 0.0 , ) ->Optional[int]: """simple docstring""" lowercase__ : Optional[Any]= np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) lowercase__ : Tuple= scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) lowercase__ : Dict= scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) lowercase__ : List[str]= int(scale_x * output_size[1] ) lowercase__ : List[Any]= int(scale_y * output_size[0] ) lowercase__ : List[Any]= [] lowercase__ : Any= [] for i, index in enumerate(lowerCAmelCase_ ): lowercase__ : Tuple= all_img_list[index] path_list.append(lowerCAmelCase_ ) lowercase__ : List[Any]= all_annos[index] lowercase__ : Optional[Any]= cva.imread(lowerCAmelCase_ ) if i == 0: # top-left lowercase__ : List[str]= cva.resize(lowerCAmelCase_ , (divid_point_x, divid_point_y) ) lowercase__ : str= img for bbox in img_annos: lowercase__ : List[Any]= bbox[1] * scale_x lowercase__ : str= bbox[2] * scale_y lowercase__ : Any= bbox[3] * scale_x lowercase__ : List[str]= bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right lowercase__ : List[Any]= cva.resize(lowerCAmelCase_ , (output_size[1] - divid_point_x, divid_point_y) ) lowercase__ : Union[str, Any]= img for bbox in img_annos: lowercase__ : int= scale_x + bbox[1] * (1 - scale_x) lowercase__ : List[str]= bbox[2] * scale_y lowercase__ : List[str]= scale_x + bbox[3] * (1 - scale_x) lowercase__ : Dict= bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left lowercase__ : Optional[int]= cva.resize(lowerCAmelCase_ , (divid_point_x, output_size[0] - divid_point_y) ) lowercase__ : Dict= img for bbox in img_annos: lowercase__ : int= bbox[1] * scale_x lowercase__ : Optional[int]= scale_y + bbox[2] * (1 - scale_y) lowercase__ : int= bbox[3] * scale_x lowercase__ : int= scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right lowercase__ : Union[str, Any]= cva.resize( lowerCAmelCase_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) lowercase__ : Union[str, Any]= img for bbox in img_annos: lowercase__ : Union[str, Any]= scale_x + bbox[1] * (1 - scale_x) lowercase__ : Dict= scale_y + bbox[2] * (1 - scale_y) lowercase__ : str= scale_x + bbox[3] * (1 - scale_x) lowercase__ : str= scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: lowercase__ : int= [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def lowercase__(A ) ->Any: """simple docstring""" assert number_char > 1, "The number of character should greater than 1" lowercase__ : Dict= ascii_lowercase + digits return "".join(random.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) ) if __name__ == "__main__": main() print("""DONE ✅""")
703
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() a : Optional[Any] = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]: """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) lowercase__ : List[Any]= config_class.from_json_file(A ) lowercase__ : Any= True lowercase__ : List[str]= True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ : Optional[int]= model_class(A ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ : List[str]= cached_file( A , A , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A ) if compare_with_pt_model: lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" ) lowercase__ : Union[str, Any]= pt_model_class.from_pretrained( pretrained_model_name_or_path=A , config=A , state_dict=A ) with torch.no_grad(): lowercase__ : str= pt_model(**pt_model.dummy_inputs ) lowercase__ : Tuple= pto[0].numpy() lowercase__ : List[Any]= tfo[0].numpy() lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(A , save_format="h5" ) def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]: """simple docstring""" if args_model_type is None: lowercase__ : Tuple= list(MODEL_CLASSES.keys() ) else: lowercase__ : Optional[int]= [args_model_type] for j, model_type in enumerate(A , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(A )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ : int= list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ : Any= model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(A , A ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ : Any= model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Union[str, Any]= config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ : str= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Any= model_shortcut_name if os.path.isfile(A ): lowercase__ : Dict= "converted_model" convert_pt_checkpoint_to_tf( model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , ) if remove_cached_files: os.remove(A ) os.remove(A ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """ """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") a : List[str] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
85
0
"""simple docstring""" import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class __UpperCAmelCase: __lowerCamelCase = None def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.feature_extraction_class(**self.feat_extract_dict ) lowercase__ : Dict= json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , lowercase__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : Dict= os.path.join(lowercase__ , "feat_extract.json" ) feat_extract_first.to_json_file(lowercase__ ) lowercase__ : Any= self.feature_extraction_class.from_json_file(lowercase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : Optional[Any]= feat_extract_first.save_pretrained(lowercase__ )[0] check_json_file_has_correct_format(lowercase__ ) lowercase__ : List[Any]= self.feature_extraction_class.from_pretrained(lowercase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.feature_extraction_class() self.assertIsNotNone(lowercase__ )
704
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
0
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging a : Any = logging.get_logger(__name__) def lowercase__(A ) ->Optional[Any]: """simple docstring""" if isinstance(a_ , np.ndarray ): return list(tensor.shape ) lowercase__ : int= tf.shape(a_ ) if tensor.shape == tf.TensorShape(a_ ): return dynamic lowercase__ : Union[str, Any]= tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(a_ )] def lowercase__(A , A = None , A = None ) ->int: """simple docstring""" return tf.nn.softmax(logits=logits + 1e-9 , axis=a_ , name=a_ ) def lowercase__(A , A , A , A=1e-5 , A=-1 ) ->str: """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(a_ , a_ ): raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." ) # Get mean and variance on the axis to be normalized lowercase__ : Any= tf.nn.moments(a_ , axes=[axis] , keepdims=a_ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase__ : Dict= [1] * inputs.shape.rank lowercase__ : Tuple= shape_list(a_ )[axis] lowercase__ : List[Any]= tf.reshape(a_ , a_ ) lowercase__ : str= tf.reshape(a_ , a_ ) # Compute layer normalization using the batch_normalization # function. lowercase__ : Dict= tf.nn.batch_normalization( a_ , a_ , a_ , offset=a_ , scale=a_ , variance_epsilon=a_ , ) return outputs def lowercase__(A , A=0 , A=-1 ) ->Optional[int]: """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase__ : str= tf.shape(a_ ) lowercase__ : int= tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase__ : int= tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(a_ , a_ ) def lowercase__(A ) ->str: """simple docstring""" if not isinstance(a_ , tf.Tensor ): lowercase__ : Optional[int]= tf.convert_to_tensor(a_ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase__ : int= encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase__ : List[Any]= encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase__ : Tuple= ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowercase__(A , A , A = "input_ids" ) ->List[Any]: """simple docstring""" tf.debugging.assert_less( a_ , tf.cast(a_ , dtype=tensor.dtype ) , message=( f'''The maximum value of {tensor_name} ({tf.math.reduce_max(a_ )}) must be smaller than the embedding ''' f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowercase__(A , A , A ) ->Dict: """simple docstring""" lowercase__ : Optional[Any]= 64_512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase__ : str= [x for x in data if len(a_ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( "The following attributes cannot be saved to HDF5 file because " f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' f'''bytes: {bad_attributes}''' ) lowercase__ : Optional[int]= np.asarray(a_ ) lowercase__ : Union[str, Any]= 1 lowercase__ : Tuple= np.array_split(a_ , a_ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase__ : List[str]= np.array_split(a_ , a_ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(a_ ): lowercase__ : Any= chunk_data else: lowercase__ : List[Any]= data def lowercase__(A , A ) ->Dict: """simple docstring""" if name in group.attrs: lowercase__ : Union[str, Any]= [n.decode("utf8" ) if hasattr(a_ , "decode" ) else n for n in group.attrs[name]] else: lowercase__ : str= [] lowercase__ : List[Any]= 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("utf8" ) if hasattr(a_ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] ) chunk_id += 1 return data def lowercase__(A ) ->Optional[Any]: """simple docstring""" def _expand_single_ad_tensor(A ): if isinstance(a_ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(a_ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , a_ )
705
"""simple docstring""" def lowercase__(A ) ->list: """simple docstring""" if n_term == "": return [] lowercase__ : list= [] for temp in range(int(A ) ): series.append(f'''1/{temp + 1}''' if series else "1" ) return series if __name__ == "__main__": a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""") print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""") print(harmonic_series(nth_term))
85
0
"""simple docstring""" def lowercase__() ->Optional[int]: """simple docstring""" for n in range(1 , 1_000_000 ): yield n * (n + 1) // 2 def lowercase__(A ) ->Tuple: """simple docstring""" lowercase__ : Optional[int]= 1 lowercase__ : List[Any]= 2 while i * i <= n: lowercase__ : Union[str, Any]= 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def lowercase__() ->List[Any]: """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(A ) > 500 ) if __name__ == "__main__": print(solution())
706
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : int = logging.get_logger(__name__) a : str = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "big_bird" def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , ) lowercase__ : Dict= vocab_size lowercase__ : Optional[int]= max_position_embeddings lowercase__ : List[Any]= hidden_size lowercase__ : List[str]= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : Optional[int]= intermediate_size lowercase__ : Optional[int]= hidden_act lowercase__ : Tuple= hidden_dropout_prob lowercase__ : int= attention_probs_dropout_prob lowercase__ : int= initializer_range lowercase__ : List[Any]= type_vocab_size lowercase__ : Union[str, Any]= layer_norm_eps lowercase__ : Optional[Any]= use_cache lowercase__ : Union[str, Any]= rescale_embeddings lowercase__ : Union[str, Any]= attention_type lowercase__ : Any= use_bias lowercase__ : List[Any]= block_size lowercase__ : Optional[Any]= num_random_blocks lowercase__ : Optional[int]= classifier_dropout class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def UpperCAmelCase_ ( self ): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : Tuple= {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
85
0
"""simple docstring""" def lowercase__(A ) ->str: """simple docstring""" if not all(char in "01" for char in bin_string ): raise ValueError("Non-binary value was passed to the function" ) if not bin_string: raise ValueError("Empty string was passed to the function" ) lowercase__ : Any= "" while len(lowercase_ ) % 3 != 0: lowercase__ : Any= "0" + bin_string lowercase__ : Optional[Any]= [ bin_string[index : index + 3] for index in range(len(lowercase_ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: lowercase__ : str= 0 for index, val in enumerate(lowercase_ ): oct_val += int(2 ** (2 - index) * int(lowercase_ ) ) oct_string += str(lowercase_ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
707
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
85
0
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class __UpperCAmelCase( UpperCAmelCase_ ): """simple docstring""" __lowerCamelCase = (DDPMScheduler,) def UpperCAmelCase_ ( self , **snake_case__ ): '''simple docstring''' lowercase__ : Dict= { "num_train_timesteps": 1000, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**_snake_case ) return config def UpperCAmelCase_ ( self ): '''simple docstring''' for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_snake_case ) def UpperCAmelCase_ ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case ) def UpperCAmelCase_ ( self ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_snake_case ) def UpperCAmelCase_ ( self ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_snake_case ) def UpperCAmelCase_ ( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=_snake_case ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.check_over_configs(thresholding=_snake_case ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , ) def UpperCAmelCase_ ( self ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_snake_case ) def UpperCAmelCase_ ( self ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=_snake_case ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.scheduler_classes[0] lowercase__ : int= self.get_scheduler_config() lowercase__ : List[Any]= scheduler_class(**_snake_case ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_09_79 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.scheduler_classes[0] lowercase__ : str= self.get_scheduler_config() lowercase__ : Tuple= scheduler_class(**_snake_case ) lowercase__ : Optional[int]= len(_snake_case ) lowercase__ : Any= self.dummy_model() lowercase__ : Optional[Any]= self.dummy_sample_deter lowercase__ : Tuple= torch.manual_seed(0 ) for t in reversed(range(_snake_case ) ): # 1. predict noise residual lowercase__ : Tuple= model(_snake_case , _snake_case ) # 2. predict previous mean of sample x_t-1 lowercase__ : Any= scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase__ : Tuple= pred_prev_sample lowercase__ : Tuple= torch.sum(torch.abs(_snake_case ) ) lowercase__ : Dict= torch.mean(torch.abs(_snake_case ) ) assert abs(result_sum.item() - 2_58.96_06 ) < 1e-2 assert abs(result_mean.item() - 0.33_72 ) < 1e-3 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.scheduler_classes[0] lowercase__ : Optional[int]= self.get_scheduler_config(prediction_type="v_prediction" ) lowercase__ : str= scheduler_class(**_snake_case ) lowercase__ : int= len(_snake_case ) lowercase__ : List[str]= self.dummy_model() lowercase__ : Any= self.dummy_sample_deter lowercase__ : Optional[Any]= torch.manual_seed(0 ) for t in reversed(range(_snake_case ) ): # 1. predict noise residual lowercase__ : int= model(_snake_case , _snake_case ) # 2. predict previous mean of sample x_t-1 lowercase__ : int= scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase__ : Any= pred_prev_sample lowercase__ : str= torch.sum(torch.abs(_snake_case ) ) lowercase__ : Union[str, Any]= torch.mean(torch.abs(_snake_case ) ) assert abs(result_sum.item() - 2_02.02_96 ) < 1e-2 assert abs(result_mean.item() - 0.26_31 ) < 1e-3 def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.scheduler_classes[0] lowercase__ : Optional[Any]= self.get_scheduler_config() lowercase__ : int= scheduler_class(**_snake_case ) lowercase__ : Dict= [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_snake_case ) lowercase__ : Optional[Any]= scheduler.timesteps for i, timestep in enumerate(_snake_case ): if i == len(_snake_case ) - 1: lowercase__ : Any= -1 else: lowercase__ : Any= timesteps[i + 1] lowercase__ : Optional[int]= scheduler.previous_timestep(_snake_case ) lowercase__ : Dict= prev_t.item() self.assertEqual(_snake_case , _snake_case ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.scheduler_classes[0] lowercase__ : Any= self.get_scheduler_config() lowercase__ : Dict= scheduler_class(**_snake_case ) lowercase__ : List[str]= [100, 87, 50, 51, 0] with self.assertRaises(_snake_case , msg="`custom_timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=_snake_case ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.scheduler_classes[0] lowercase__ : Tuple= self.get_scheduler_config() lowercase__ : int= scheduler_class(**_snake_case ) lowercase__ : int= [100, 87, 50, 1, 0] lowercase__ : Union[str, Any]= len(_snake_case ) with self.assertRaises(_snake_case , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ): scheduler.set_timesteps(num_inference_steps=_snake_case , timesteps=_snake_case ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.scheduler_classes[0] lowercase__ : Union[str, Any]= self.get_scheduler_config() lowercase__ : List[str]= scheduler_class(**_snake_case ) lowercase__ : str= [scheduler.config.num_train_timesteps] with self.assertRaises( _snake_case , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=_snake_case )
708
"""simple docstring""" from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def lowercase__(A , A ) ->List[Any]: """simple docstring""" lowercase__ : str= [] for part_id in partition_order: lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(A ): expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Dict= Spark(A ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(2 ) lowercase__ : Optional[Any]= [1, 0] lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions. lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->int: """simple docstring""" lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(1 ) lowercase__ : str= SparkExamplesIterable(A ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(A ): assert row_id == f'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : int= spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: lowercase__ : Optional[Any]= lambda A : x.reverse() lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] ) lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Any: """simple docstring""" lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 ) # Partitions 0 and 2 lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Optional[int]= Spark(A ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
85
0
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip a : Optional[int] = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowercase__(A ) ->Tuple: """simple docstring""" if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowercase__(A , A , A ) ->str: """simple docstring""" return max(metric_fn(A , A ) for gt in ground_truths ) def lowercase__(A , A , A ) ->List[Any]: """simple docstring""" lowercase__ : Optional[int]= [line.strip() for line in open(A , "r" ).readlines()] lowercase__ : Tuple= [] if args.gold_data_mode == "qa": lowercase__ : Dict= pd.read_csv(A , sep="\t" , header=A ) for answer_list in data[1]: lowercase__ : Union[str, Any]= ast.literal_eval(A ) answers.append(A ) else: lowercase__ : Optional[int]= [line.strip() for line in open(A , "r" ).readlines()] lowercase__ : str= [[reference] for reference in references] lowercase__ : Union[str, Any]= 0 for prediction, ground_truths in zip(A , A ): total += 1 em += metric_max_over_ground_truths(A , A , A ) fa += metric_max_over_ground_truths(A , A , A ) lowercase__ : Optional[int]= 100.0 * em / total lowercase__ : Optional[int]= 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def lowercase__(A , A , A ) ->Tuple: """simple docstring""" lowercase__ : int= args.k lowercase__ : List[Any]= [line.strip() for line in open(A , "r" ).readlines()] lowercase__ : List[Any]= [line.strip() for line in open(A , "r" ).readlines()] lowercase__ : Tuple= 0 for hypo, reference in zip(A , A ): lowercase__ : str= set(hypo.split("\t" )[:k] ) lowercase__ : Dict= set(reference.split("\t" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k lowercase__ : int= 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def lowercase__(A , A , A ) ->Tuple: """simple docstring""" def strip_title(A ): if title.startswith("\"" ): lowercase__ : Optional[Any]= title[1:] if title.endswith("\"" ): lowercase__ : str= title[:-1] return title lowercase__ : str= rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( A , return_tensors="pt" , padding=A , truncation=A , )["input_ids"].to(args.device ) lowercase__ : Tuple= rag_model.rag.question_encoder(A ) lowercase__ : str= question_enc_outputs[0] lowercase__ : Union[str, Any]= rag_model.retriever( A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , ) lowercase__ : int= rag_model.retriever.index.get_doc_dicts(result.doc_ids ) lowercase__ : Union[str, Any]= [] for docs in all_docs: lowercase__ : Optional[int]= [strip_title(A ) for title in docs["title"]] provenance_strings.append("\t".join(A ) ) return provenance_strings def lowercase__(A , A , A ) ->Dict: """simple docstring""" with torch.no_grad(): lowercase__ : Optional[Any]= rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( A , return_tensors="pt" , padding=A , truncation=A ) lowercase__ : Tuple= inputs_dict.input_ids.to(args.device ) lowercase__ : Optional[int]= inputs_dict.attention_mask.to(args.device ) lowercase__ : Dict= rag_model.generate( # rag_model overwrites generate A , attention_mask=A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) lowercase__ : Union[str, Any]= rag_model.retriever.generator_tokenizer.batch_decode(A , skip_special_tokens=A ) if args.print_predictions: for q, a in zip(A , A ): logger.info("Q: {} - A: {}".format(A , A ) ) return answers def lowercase__() ->Any: """simple docstring""" lowercase__ : Optional[Any]= argparse.ArgumentParser() parser.add_argument( "--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=A , help=( "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" " model_name_or_path" ) , ) parser.add_argument( "--index_name" , default=A , choices=["exact", "compressed", "legacy"] , type=A , help="RAG model retriever type" , ) parser.add_argument( "--index_path" , default=A , type=A , help="Path to the retrieval index" , ) parser.add_argument("--n_docs" , default=5 , type=A , help="Number of retrieved docs" ) parser.add_argument( "--model_name_or_path" , default=A , type=A , required=A , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , ) parser.add_argument( "--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=A , help=( "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" " precision@k." ) , ) parser.add_argument("--k" , default=1 , type=A , help="k for the precision@k calculation" ) parser.add_argument( "--evaluation_set" , default=A , type=A , required=A , help="Path to a file containing evaluation samples" , ) parser.add_argument( "--gold_data_path" , default=A , type=A , required=A , help="Path to a tab-separated file with gold samples" , ) parser.add_argument( "--gold_data_mode" , default="qa" , type=A , choices=["qa", "ans"] , help=( "Format of the gold data file" "qa - a single line in the following format: question [tab] answer_list" "ans - a single line of the gold file contains the expected answer string" ) , ) parser.add_argument( "--predictions_path" , type=A , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , ) parser.add_argument( "--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , ) parser.add_argument( "--eval_batch_size" , default=8 , type=A , help="Batch size per GPU/CPU for evaluation." , ) parser.add_argument( "--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , ) parser.add_argument( "--num_beams" , default=4 , type=A , help="Number of beams to be used when generating answers" , ) parser.add_argument("--min_length" , default=1 , type=A , help="Min length of the generated answers" ) parser.add_argument("--max_length" , default=50 , type=A , help="Max length of the generated answers" ) parser.add_argument( "--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , ) parser.add_argument( "--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , ) lowercase__ : Optional[int]= parser.parse_args() lowercase__ : List[Any]= torch.device("cuda" if torch.cuda.is_available() else "cpu" ) return args def lowercase__(A ) ->Optional[int]: """simple docstring""" lowercase__ : Dict= {} if args.model_type is None: lowercase__ : List[str]= infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("rag" ): lowercase__ : str= RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration lowercase__ : str= args.n_docs if args.index_name is not None: lowercase__ : List[Any]= args.index_name if args.index_path is not None: lowercase__ : str= args.index_path else: lowercase__ : int= BartForConditionalGeneration lowercase__ : str= ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("Evaluate the following checkpoints: %s" , A ) lowercase__ : Union[str, Any]= get_scores if args.eval_mode == "e2e" else get_precision_at_k lowercase__ : Union[str, Any]= evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) ) score_fn(A , args.predictions_path , args.gold_data_path ) continue logger.info("***** Running evaluation for {} *****".format(A ) ) logger.info(" Batch size = %d" , args.eval_batch_size ) logger.info(" Predictions will be stored under {}".format(args.predictions_path ) ) if args.model_type.startswith("rag" ): lowercase__ : Dict= RagRetriever.from_pretrained(A , **A ) lowercase__ : Tuple= model_class.from_pretrained(A , retriever=A , **A ) model.retriever.init_retrieval() else: lowercase__ : Optional[Any]= model_class.from_pretrained(A , **A ) model.to(args.device ) with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file: lowercase__ : Optional[Any]= [] for line in tqdm(A ): questions.append(line.strip() ) if len(A ) == args.eval_batch_size: lowercase__ : Any= evaluate_batch_fn(A , A , A ) preds_file.write("\n".join(A ) + "\n" ) preds_file.flush() lowercase__ : List[Any]= [] if len(A ) > 0: lowercase__ : str= evaluate_batch_fn(A , A , A ) preds_file.write("\n".join(A ) ) preds_file.flush() score_fn(A , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": a : Dict = get_args() main(args)
709
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ): '''simple docstring''' lowercase__ : Optional[int]= parent lowercase__ : Tuple= batch_size lowercase__ : Tuple= seq_length lowercase__ : str= is_training lowercase__ : str= use_input_lengths lowercase__ : Any= use_token_type_ids lowercase__ : List[Any]= use_labels lowercase__ : Optional[int]= gelu_activation lowercase__ : str= sinusoidal_embeddings lowercase__ : List[str]= causal lowercase__ : Any= asm lowercase__ : Optional[int]= n_langs lowercase__ : Union[str, Any]= vocab_size lowercase__ : int= n_special lowercase__ : Any= hidden_size lowercase__ : int= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : List[str]= hidden_dropout_prob lowercase__ : str= attention_probs_dropout_prob lowercase__ : Any= max_position_embeddings lowercase__ : List[Any]= type_vocab_size lowercase__ : int= type_sequence_label_size lowercase__ : Any= initializer_range lowercase__ : Optional[int]= num_labels lowercase__ : Union[str, Any]= num_choices lowercase__ : List[Any]= summary_type lowercase__ : Optional[int]= use_proj lowercase__ : int= scope def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Tuple= None if self.use_input_lengths: lowercase__ : List[Any]= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase__ : Tuple= None if self.use_token_type_ids: lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase__ : str= None lowercase__ : Tuple= None lowercase__ : Dict= None if self.use_labels: lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float() lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : List[Any]= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase_ ( self ): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : Any= FlaubertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ ) lowercase__ : str= model(snake_case__ , langs=snake_case__ ) lowercase__ : Any= model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : List[str]= model(snake_case__ ) lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= model(snake_case__ ) lowercase__ : Any= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , ) lowercase__ : List[str]= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , ) ((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple() lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) ((lowercase__), ) : List[Any]= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ ) lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= self.num_labels lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : int= self.num_choices lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Any= model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ) : Any= config_and_inputs lowercase__ : Tuple= { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) __lowerCamelCase = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowercase__ : List[Any]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) lowercase__ : List[str]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModelTester(self ) lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowercase__ : int= True lowercase__ : List[Any]= model_class(config=snake_case__ ) lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ ) lowercase__ : Dict= torch.jit.trace( snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) ) lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ ) loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) ) @require_torch class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): lowercase__ : Optional[int]= model(snake_case__ )[0] lowercase__ : Optional[int]= torch.Size((1, 11, 768) ) self.assertEqual(output.shape , snake_case__ ) lowercase__ : Dict= torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
85
0
"""simple docstring""" def lowercase__(A , A ): """simple docstring""" lowercase__ : Union[str, Any]= """""" for i in table: res += inp[i - 1] return res def lowercase__(A ): """simple docstring""" return data[1:] + data[0] def lowercase__(A , A ): """simple docstring""" lowercase__ : Dict= """""" for i in range(len(snake_case_ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def lowercase__(A , A ): """simple docstring""" lowercase__ : int= int("0b" + data[0] + data[-1] , 2 ) lowercase__ : Any= int("0b" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def lowercase__(A , A , A , A , A ): """simple docstring""" lowercase__ : List[str]= message[:4] lowercase__ : List[Any]= message[4:] lowercase__ : Union[str, Any]= apply_table(snake_case_ , snake_case_ ) lowercase__ : List[Any]= xor(snake_case_ , snake_case_ ) lowercase__ : Optional[Any]= apply_sbox(snake_case_ , temp[:4] ) # noqa: E741 lowercase__ : List[Any]= apply_sbox(snake_case_ , temp[4:] ) lowercase__ : int= """0""" * (2 - len(snake_case_ )) + l # noqa: E741 lowercase__ : Union[str, Any]= """0""" * (2 - len(snake_case_ )) + r lowercase__ : List[Any]= apply_table(l + r , snake_case_ ) lowercase__ : Any= xor(snake_case_ , snake_case_ ) return temp + right if __name__ == "__main__": a : Union[str, Any] = input("""Enter 10 bit key: """) a : List[Any] = input("""Enter 8 bit message: """) a : Union[str, Any] = [6, 3, 7, 4, 8, 5, 10, 9] a : Optional[int] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] a : int = [2, 4, 3, 1] a : Tuple = [2, 6, 3, 1, 4, 8, 5, 7] a : Any = [4, 1, 3, 5, 7, 2, 8, 6] a : str = [4, 1, 2, 3, 2, 3, 4, 1] a : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] a : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation a : int = apply_table(key, paa_table) a : Optional[int] = temp[:5] a : str = temp[5:] a : Any = left_shift(left) a : Any = left_shift(right) a : List[str] = apply_table(left + right, pa_table) a : int = left_shift(left) a : Optional[int] = left_shift(right) a : Optional[Any] = left_shift(left) a : Dict = left_shift(right) a : Optional[int] = apply_table(left + right, pa_table) # encryption a : Any = apply_table(message, IP) a : Union[str, Any] = function(expansion, sa, sa, keya, temp) a : Dict = temp[4:] + temp[:4] a : Optional[int] = function(expansion, sa, sa, keya, temp) a : Optional[int] = apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption a : Any = apply_table(CT, IP) a : str = function(expansion, sa, sa, keya, temp) a : Optional[Any] = temp[4:] + temp[:4] a : Optional[int] = function(expansion, sa, sa, keya, temp) a : List[str] = apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
710
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = None class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 2 @register_to_config def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ): '''simple docstring''' # standard deviation of the initial noise distribution lowercase__ : int= sigma_max # setable values lowercase__ : int= None lowercase__ : np.IntTensor= None lowercase__ : torch.FloatTensor= None # sigma(t_i) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' return sample def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' lowercase__ : List[Any]= num_inference_steps lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy() lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ ) lowercase__ : Union[str, Any]= [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: lowercase__ : str= 0 # sample eps ~ N(0, S_noise^2 * I) lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device ) lowercase__ : str= sigma + gamma * sigma lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : int= sample_prev + sigma_prev * model_output lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' raise NotImplementedError()
85
0
"""simple docstring""" from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 a : Union[str, Any] = { # 1536-bit 5: { 'prime': int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1""" + """29024E088A67CC74020BBEA63B139B22514A08798E3404DD""" + """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245""" + """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED""" + """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D""" + """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F""" + """83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""", base=16, ), 'generator': 2, }, # 2048-bit 14: { 'prime': int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1""" + """29024E088A67CC74020BBEA63B139B22514A08798E3404DD""" + """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245""" + """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED""" + """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D""" + """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F""" + """83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B""" + """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9""" + """DE2BCBF6955817183995497CEA956AE515D2261898FA0510""" + """15728E5A8AACAA68FFFFFFFFFFFFFFFF""", base=16, ), 'generator': 2, }, # 3072-bit 15: { 'prime': int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1""" + """29024E088A67CC74020BBEA63B139B22514A08798E3404DD""" + """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245""" + """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED""" + """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D""" + """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F""" + """83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B""" + """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9""" + """DE2BCBF6955817183995497CEA956AE515D2261898FA0510""" + """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64""" + """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7""" + """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B""" + """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C""" + """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31""" + """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""", base=16, ), 'generator': 2, }, # 4096-bit 16: { 'prime': int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1""" + """29024E088A67CC74020BBEA63B139B22514A08798E3404DD""" + """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245""" + """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED""" + """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D""" + """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F""" + """83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B""" + """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9""" + """DE2BCBF6955817183995497CEA956AE515D2261898FA0510""" + """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64""" + """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7""" + """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B""" + """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C""" + """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31""" + """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7""" + """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA""" + """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6""" + """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED""" + """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9""" + """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199""" + """FFFFFFFFFFFFFFFF""", base=16, ), 'generator': 2, }, # 6144-bit 17: { 'prime': int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08""" + """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B""" + """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9""" + """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6""" + """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8""" + """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C""" + """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718""" + """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D""" + """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D""" + """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226""" + """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C""" + """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC""" + """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26""" + """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB""" + """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2""" + """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127""" + """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492""" + """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406""" + """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918""" + """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151""" + """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03""" + """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F""" + """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA""" + """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B""" + """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632""" + """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E""" + """6DCC4024FFFFFFFFFFFFFFFF""", base=16, ), 'generator': 2, }, # 8192-bit 18: { 'prime': int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1""" + """29024E088A67CC74020BBEA63B139B22514A08798E3404DD""" + """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245""" + """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED""" + """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D""" + """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F""" + """83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B""" + """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9""" + """DE2BCBF6955817183995497CEA956AE515D2261898FA0510""" + """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64""" + """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7""" + """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B""" + """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C""" + """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31""" + """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7""" + """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA""" + """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6""" + """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED""" + """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9""" + """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492""" + """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD""" + """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831""" + """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B""" + """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF""" + """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6""" + """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3""" + """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA""" + """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328""" + """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C""" + """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE""" + """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4""" + """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300""" + """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568""" + """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9""" + """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B""" + """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A""" + """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36""" + """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1""" + """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92""" + """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47""" + """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71""" + """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""", base=16, ), 'generator': 2, }, } class __UpperCAmelCase: """simple docstring""" def __init__( self , snake_case__ = 14 ): '''simple docstring''' if group not in primes: raise ValueError("Unsupported Group" ) lowercase__ : List[str]= primes[group]['''prime'''] lowercase__ : Tuple= primes[group]['''generator'''] lowercase__ : Optional[int]= int(hexlify(urandom(32 ) ) , base=16 ) def UpperCAmelCase_ ( self ): '''simple docstring''' return hex(self.__private_key )[2:] def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= pow(self.generator , self.__private_key , self.prime ) return hex(UpperCamelCase__ )[2:] def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(UpperCamelCase__ , (self.prime - 1) // 2 , self.prime ) == 1 ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : str= int(UpperCamelCase__ , base=16 ) if not self.is_valid_public_key(UpperCamelCase__ ): raise ValueError("Invalid public key" ) lowercase__ : List[Any]= pow(UpperCamelCase__ , self.__private_key , self.prime ) return shaaaa(str(UpperCamelCase__ ).encode() ).hexdigest() @staticmethod def UpperCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(UpperCamelCase__ , (prime - 1) // 2 , UpperCamelCase__ ) == 1 ) @staticmethod def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = 14 ): '''simple docstring''' lowercase__ : Optional[Any]= int(UpperCamelCase__ , base=16 ) lowercase__ : Any= int(UpperCamelCase__ , base=16 ) lowercase__ : int= primes[group]['''prime'''] if not DiffieHellman.is_valid_public_key_static(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError("Invalid public key" ) lowercase__ : Union[str, Any]= pow(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return shaaaa(str(UpperCamelCase__ ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
711
"""simple docstring""" from ....utils import logging a : List[str] = logging.get_logger(__name__) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ): '''simple docstring''' lowercase__ : Dict= config.__dict__ lowercase__ : str= modal_hidden_size if num_labels: lowercase__ : List[str]= num_labels
85
0
"""simple docstring""" import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input a : int = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine""" def lowercase__() ->Dict: """simple docstring""" lowercase__ : Optional[Any]= _ask_options( "In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: lowercase__ : List[str]= get_sagemaker_input() else: lowercase__ : Dict= get_cluster_input() return config def lowercase__(A=None ) ->List[str]: """simple docstring""" if subparsers is not None: lowercase__ : Any= subparsers.add_parser("config" , description=lowerCamelCase_ ) else: lowercase__ : int= argparse.ArgumentParser("Accelerate config command" , description=lowerCamelCase_ ) parser.add_argument( "--config_file" , default=lowerCamelCase_ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have " "such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed " "with \'huggingface\'." ) , ) if subparsers is not None: parser.set_defaults(func=lowerCamelCase_ ) return parser def lowercase__(A ) ->int: """simple docstring""" lowercase__ : Union[str, Any]= get_user_input() if args.config_file is not None: lowercase__ : str= args.config_file else: if not os.path.isdir(lowerCamelCase_ ): os.makedirs(lowerCamelCase_ ) lowercase__ : Any= default_yaml_config_file if config_file.endswith(".json" ): config.to_json_file(lowerCamelCase_ ) else: config.to_yaml_file(lowerCamelCase_ ) print(f'''accelerate configuration saved at {config_file}''' ) def lowercase__() ->Optional[Any]: """simple docstring""" lowercase__ : List[str]= config_command_parser() lowercase__ : Optional[int]= parser.parse_args() config_command(lowerCamelCase_ ) if __name__ == "__main__": main()
712
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase__(A ) ->int: """simple docstring""" lowercase__ : Optional[int]= [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def lowercase__(A , A ) ->Any: """simple docstring""" lowercase__ : Any= [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def lowercase__(A ) ->List[Any]: """simple docstring""" lowercase__ : Dict= [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") ) return token def lowercase__() ->Union[str, Any]: """simple docstring""" lowercase__ : Dict= [] head.append(("layernorm.weight", "norm.weight") ) head.append(("layernorm.bias", "norm.bias") ) head.append(("classifier.weight", "head.weight") ) head.append(("classifier.bias", "head.bias") ) return head def lowercase__(A , A , A , A ) ->Optional[int]: """simple docstring""" lowercase__ : List[str]= "imagenet-1k-id2label.json" lowercase__ : List[str]= 1_000 lowercase__ : Tuple= "huggingface/label-files" lowercase__ : int= num_labels lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) ) lowercase__ : str= {int(A ): v for k, v in idalabel.items()} lowercase__ : Optional[int]= idalabel lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()} lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13": lowercase__ : int= [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21": lowercase__ : Union[str, Any]= [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Optional[Any]= [2, 2, 20] lowercase__ : Optional[Any]= [3, 12, 16] lowercase__ : List[str]= [192, 768, 1_024] lowercase__ : List[str]= CvtForImageClassification(A ) lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) lowercase__ : Dict= image_size lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) ) lowercase__ : Optional[Any]= OrderedDict() lowercase__ : Tuple= [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowercase__ : Optional[int]= list_of_state_dict + cls_token(A ) lowercase__ : List[str]= list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): lowercase__ : Dict= list_of_state_dict + attention(A , A ) lowercase__ : Optional[Any]= list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): lowercase__ : str= original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": a : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--cvt_model""", default="""cvt-w24""", type=str, help="""Name of the cvt model you'd like to convert.""", ) parser.add_argument( """--image_size""", default=384, type=int, help="""Input Image Size""", ) parser.add_argument( """--cvt_file_name""", default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""", type=str, help="""Input Image Size""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a : Optional[int] = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
85
0
"""simple docstring""" from collections import defaultdict class __UpperCAmelCase: """simple docstring""" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Dict= total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 lowercase__ : str= [ [-1 for i in range(total + 1 )] for j in range(2 ** len(snake_case__ ) ) ] lowercase__ : Any= defaultdict(snake_case__ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 lowercase__ : Dict= (1 << len(snake_case__ )) - 1 def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' # if mask == self.finalmask all persons are distributed tasks, return 1 if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement lowercase__ : Any= self.count_ways_until(snake_case__ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. lowercase__ : Tuple= total_ways_util return self.dp[mask][task_no] def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' # Store the list of persons for each task for i in range(len(snake_case__ ) ): for j in task_performed[i]: self.task[j].append(snake_case__ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": a : Optional[Any] = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. a : Optional[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
713
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() self.register_modules(unet=snake_case__ , scheduler=snake_case__ ) @torch.no_grad() def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[Any]= self.unet.config.sample_size lowercase__ : Dict= (batch_size, 3, img_size, img_size) lowercase__ : List[Any]= self.unet lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma lowercase__ : Tuple= sample.to(self.device ) self.scheduler.set_timesteps(snake_case__ ) self.scheduler.set_sigmas(snake_case__ ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # prediction step lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ) lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean lowercase__ : List[str]= sample_mean.clamp(0 , 1 ) lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ : str= self.numpy_to_pil(snake_case__ ) if not return_dict: return (sample,) return ImagePipelineOutput(images=snake_case__ )
85
0
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse("""3.8"""): import importlib_metadata else: import importlib.metadata as importlib_metadata a : List[Any] = """""" if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""): class __UpperCAmelCase( tr.AbstractTransform ): """simple docstring""" def __init__( self , snake_case__ = " " ): '''simple docstring''' lowercase__ : List[Any]= sentence_delimiter def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' return list(__A ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : str= [] for sent_idx, sentence in enumerate(__A ): chars.extend(self.process_string(__A ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__A ) - 1: chars.append(self.sentence_delimiter ) return chars a : Optional[int] = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: a : Optional[int] = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) a : List[Any] = """\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } """ a : Optional[Any] = """\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. """ a : str = """ Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = [\"this is the prediction\", \"there is an other sample\"] >>> references = [\"this is the reference\", \"there is another one\"] >>> cer = datasets.load_metric(\"cer\") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCAmelCase( datasets.Metric ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", "https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates", ] , ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' if concatenate_texts: return jiwer.compute_measures( __A , __A , truth_transform=__A , hypothesis_transform=__A , )["wer"] lowercase__ : Union[str, Any]= 0 lowercase__ : str= 0 for prediction, reference in zip(__A , __A ): lowercase__ : Any= jiwer.compute_measures( __A , __A , truth_transform=__A , hypothesis_transform=__A , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
714
"""simple docstring""" def lowercase__(A ) ->list[int]: """simple docstring""" lowercase__ : List[str]= len(A ) for i in range(A ): for j in range(i + 1 , A ): if numbers[j] < numbers[i]: lowercase__, lowercase__ : List[str]= numbers[j], numbers[i] return numbers if __name__ == "__main__": a : Dict = input("""Enter numbers separated by a comma:\n""").strip() a : List[str] = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
85
0
"""simple docstring""" def lowercase__(A , A ) ->Union[str, Any]: lowercase__ : List[str]= int(UpperCamelCase__ ) # Initialize Result lowercase__ : List[Any]= [] # Traverse through all denomination for denomination in reversed(UpperCamelCase__ ): # Find denominations while int(UpperCamelCase__ ) >= int(UpperCamelCase__ ): total_value -= int(UpperCamelCase__ ) answer.append(UpperCamelCase__ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": a : Dict = [] a : str = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): a : Any = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) a : Tuple = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter a : Dict = [1, 2, 5, 10, 20, 50, 100, 500, 2000] a : Union[str, Any] = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(F"""Following is minimal change for {value}: """) a : Any = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
715
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def lowercase__(A ) ->bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase__() ->Iterator[int]: """simple docstring""" lowercase__ : Union[str, Any]= 2 while True: if is_prime(A ): yield num num += 1 def lowercase__(A = 2_000_000 ) ->int: """simple docstring""" return sum(takewhile(lambda A : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"""{solution() = }""")
85
0
"""simple docstring""" import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() a : Optional[int] = logging.get_logger(__name__) def lowercase__(A , A ) ->Optional[int]: """simple docstring""" lowercase__ : int= [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("encoder.deit.cls_token", "encoder.embeddings.cls_token"), ("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"), ("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"), ("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"), ("encoder.deit.norm.weight", "encoder.layernorm.weight"), ("encoder.deit.norm.bias", "encoder.layernorm.bias"), ] ) return rename_keys def lowercase__(A , A ) ->Optional[int]: """simple docstring""" for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) lowercase__ : Optional[int]= state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' ) lowercase__ : Optional[int]= in_proj_weight[ : encoder_config.hidden_size, : ] lowercase__ : int= in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] lowercase__ : str= in_proj_weight[ -encoder_config.hidden_size :, : ] def lowercase__(A , A , A ) ->Tuple: """simple docstring""" lowercase__ : Tuple= dct.pop(_UpperCAmelCase ) lowercase__ : int= val def lowercase__(A ) ->List[str]: """simple docstring""" if "handwritten" in checkpoint_url: lowercase__ : str= "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: lowercase__ : Tuple= "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg" lowercase__ : Tuple= Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" ) return im @torch.no_grad() def lowercase__(A , A ) ->int: """simple docstring""" lowercase__ : Optional[Any]= ViTConfig(image_size=384 , qkv_bias=_UpperCAmelCase ) lowercase__ : Union[str, Any]= TrOCRConfig() # size of the architecture if "base" in checkpoint_url: lowercase__ : List[Any]= 768 elif "large" in checkpoint_url: # use ViT-large encoder lowercase__ : Tuple= 1_024 lowercase__ : Optional[int]= 4_096 lowercase__ : int= 24 lowercase__ : List[str]= 16 lowercase__ : List[Any]= 1_024 else: raise ValueError("Should either find \'base\' or \'large\' in checkpoint URL" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: lowercase__ : str= False lowercase__ : List[Any]= "relu" lowercase__ : List[str]= 1_024 lowercase__ : str= True lowercase__ : int= False lowercase__ : Any= False # load HuggingFace model lowercase__ : Dict= ViTModel(_UpperCAmelCase , add_pooling_layer=_UpperCAmelCase ) lowercase__ : str= TrOCRForCausalLM(_UpperCAmelCase ) lowercase__ : Any= VisionEncoderDecoderModel(encoder=_UpperCAmelCase , decoder=_UpperCAmelCase ) model.eval() # load state_dict of original model, rename some keys lowercase__ : Tuple= torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="cpu" , check_hash=_UpperCAmelCase )["model"] lowercase__ : List[Any]= create_rename_keys(_UpperCAmelCase , _UpperCAmelCase ) for src, dest in rename_keys: rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): lowercase__ : Tuple= state_dict.pop(_UpperCAmelCase ) if key.startswith("decoder" ) and "output_projection" not in key: lowercase__ : Any= val else: lowercase__ : Optional[Any]= val # load state dict model.load_state_dict(_UpperCAmelCase ) # Check outputs on an image lowercase__ : str= ViTImageProcessor(size=encoder_config.image_size ) lowercase__ : int= RobertaTokenizer.from_pretrained("roberta-large" ) lowercase__ : Any= TrOCRProcessor(_UpperCAmelCase , _UpperCAmelCase ) lowercase__ : str= processor(images=prepare_img(_UpperCAmelCase ) , return_tensors="pt" ).pixel_values # verify logits lowercase__ : Any= torch.tensor([[model.config.decoder.decoder_start_token_id]] ) lowercase__ : Optional[Any]= model(pixel_values=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ) lowercase__ : int= outputs.logits lowercase__ : Any= torch.Size([1, 1, 50_265] ) if "trocr-base-handwritten" in checkpoint_url: lowercase__ : Tuple= torch.tensor( [-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] ) elif "trocr-large-handwritten" in checkpoint_url: lowercase__ : Tuple= torch.tensor( [-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] ) elif "trocr-base-printed" in checkpoint_url: lowercase__ : List[Any]= torch.tensor( [-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] ) elif "trocr-large-printed" in checkpoint_url: lowercase__ : Optional[int]= torch.tensor( [-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , _UpperCAmelCase , atol=1e-3 ), "First elements of logits not as expected" Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_UpperCAmelCase ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": a : int = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) a : Optional[int] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
716
"""simple docstring""" def lowercase__(A ) ->bool: """simple docstring""" lowercase__ : Tuple= (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowercase__(A = 5_000 ) ->int: """simple docstring""" lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )] for i, pentagonal_i in enumerate(A ): for j in range(A , len(A ) ): lowercase__ : List[Any]= pentagonal_nums[j] lowercase__ : int= pentagonal_i + pentagonal_j lowercase__ : Optional[int]= pentagonal_j - pentagonal_i if is_pentagonal(A ) and is_pentagonal(A ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
85
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a : Union[str, Any] = { """configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[Any] = [ """RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """ResNetForImageClassification""", """ResNetModel""", """ResNetPreTrainedModel""", """ResNetBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = [ """TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFResNetForImageClassification""", """TFResNetModel""", """TFResNetPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = [ """FlaxResNetForImageClassification""", """FlaxResNetModel""", """FlaxResNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
717
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[str] = logging.get_logger(__name__) a : Union[str, Any] = { """google/pix2struct-textcaps-base""": ( """https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json""" ), } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_text_model" __lowerCamelCase = ["past_key_values"] __lowerCamelCase = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' lowercase__ : int= vocab_size lowercase__ : Optional[Any]= hidden_size lowercase__ : Tuple= d_kv lowercase__ : Optional[int]= d_ff lowercase__ : Any= num_layers lowercase__ : Dict= num_heads lowercase__ : List[Any]= relative_attention_num_buckets lowercase__ : Optional[Any]= relative_attention_max_distance lowercase__ : Dict= dropout_rate lowercase__ : Tuple= layer_norm_epsilon lowercase__ : str= initializer_factor lowercase__ : Any= use_cache lowercase__ : Optional[int]= eos_token_id lowercase__ : str= decoder_start_token_id # for backwards compatibility lowercase__ : Optional[Any]= dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : str= config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct_vision_model" def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase__ : Tuple= hidden_size lowercase__ : Tuple= patch_embed_hidden_size lowercase__ : Optional[Any]= d_ff lowercase__ : Dict= dropout_rate lowercase__ : Any= num_hidden_layers lowercase__ : Optional[int]= num_attention_heads lowercase__ : Dict= initializer_range lowercase__ : Tuple= initializer_factor lowercase__ : Tuple= attention_dropout lowercase__ : Optional[Any]= layer_norm_eps lowercase__ : List[Any]= dense_act_fn lowercase__ : str= seq_len lowercase__ : List[str]= relative_attention_num_buckets lowercase__ : Union[str, Any]= relative_attention_max_distance lowercase__ : Dict= d_kv @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' cls._set_token_in_kwargs(snake_case__ ) lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowercase__ : Union[str, Any]= config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case__ , **snake_case__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "pix2struct" __lowerCamelCase = True def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowercase__ : List[Any]= {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: lowercase__ : str= {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) lowercase__ : str= PixaStructTextConfig(**snake_case__ ) lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ ) lowercase__ : int= self.text_config.decoder_start_token_id lowercase__ : List[Any]= self.text_config.pad_token_id lowercase__ : Any= self.text_config.eos_token_id lowercase__ : Any= initializer_factor lowercase__ : int= initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : List[str]= self.initializer_range lowercase__ : Dict= is_vqa @classmethod def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ ) lowercase__ : str= self.text_config.to_dict() lowercase__ : str= self.vision_config.to_dict() lowercase__ : List[str]= self.__class__.model_type return output
85
0
"""simple docstring""" class __UpperCAmelCase: """simple docstring""" def __init__( self ): '''simple docstring''' lowercase__ : Dict= {} def UpperCAmelCase_ ( self ): '''simple docstring''' print(self.vertex ) for i in self.vertex: print(__A , " -> " , " -> ".join([str(__A ) for j in self.vertex[i]] ) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(__A ) else: # else make a new vertex lowercase__ : Optional[int]= [to_vertex] def UpperCAmelCase_ ( self ): '''simple docstring''' # visited array for storing already visited nodes lowercase__ : Optional[int]= [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(__A , __A ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' # mark start vertex as visited lowercase__ : Optional[Any]= True print(__A , end=" " ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(__A , __A ) if __name__ == "__main__": a : Union[str, Any] = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("""DFS:""") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
718
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) lowercase__ : str= AutoTokenizer.from_pretrained("google/mt5-small" ) lowercase__ : Tuple= tokenizer("Hello there" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= tokenizer("Hi I am" , return_tensors="tf" ).input_ids lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ).loss lowercase__ : int= -tf.math.reduce_mean(snake_case__ ).numpy() lowercase__ : int= -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
85
0
def lowercase__(A ) ->int: """simple docstring""" if not nums: # Makes sure that the list is not empty raise ValueError("List is empty" ) lowercase__ : Any= sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
719
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "tokenizer"] __lowerCamelCase = "BridgeTowerImageProcessor" __lowerCamelCase = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[int]= self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) # add pixel_values + pixel_mask lowercase__ : Optional[int]= self.image_processor( snake_case__ , return_tensors=snake_case__ , do_normalize=snake_case__ , do_center_crop=snake_case__ , **snake_case__ ) encoding.update(snake_case__ ) return encoding def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.tokenizer.model_input_names lowercase__ : List[Any]= self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
85
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) a : Dict = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Union[str, Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : int = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Any = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
720
"""simple docstring""" import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= tempfile.mkdtemp() lowercase__ : Optional[Any]= 8 # DPR tok lowercase__ : Tuple= [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowercase__ : Any= os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowercase__ : Any= os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok lowercase__ : List[Any]= [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Tuple= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) lowercase__ : Any= ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple= {"unk_token": "<unk>"} lowercase__ : int= os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) lowercase__ : List[str]= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : str= os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case__ ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.get_dummy_dataset() lowercase__ : Optional[Any]= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Tuple= dataset lowercase__ : Optional[int]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Dict= self.get_dummy_dataset() lowercase__ : Tuple= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , ) if from_disk: lowercase__ : Tuple= os.path.join(self.tmpdirname , "dataset" ) lowercase__ : Optional[Any]= os.path.join(self.tmpdirname , "index.faiss" ) dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) ) dataset.drop_index("embeddings" ) dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) ) del dataset lowercase__ : List[Any]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowercase__ : Optional[int]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case__ ) , ) return retriever def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT ) lowercase__ : Optional[int]= os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" ) dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" ) pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) ) lowercase__ : int= os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" ) lowercase__ : str= {sample["id"]: [sample["text"], sample["title"]] for sample in dataset} pickle.dump(snake_case__ , open(snake_case__ , "wb" ) ) lowercase__ : List[Any]= RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , ) lowercase__ : Optional[Any]= RagRetriever( snake_case__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= 1 lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever() lowercase__ : Union[str, Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset: lowercase__ : Tuple= self.get_dummy_dataset() retriever.save_pretrained(snake_case__ ) lowercase__ : int= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : Any= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= 1 lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) lowercase__ : Union[str, Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Any= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : int= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : Tuple= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : str= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= 1 lowercase__ : str= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) lowercase__ : List[str]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[int]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] ) self.assertEqual(len(doc_dicts[0]["id"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : Optional[Any]= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : int= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Union[str, Any]= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= 1 lowercase__ : int= self.get_dummy_legacy_index_retriever() lowercase__ : Optional[Any]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__, lowercase__, lowercase__ : Optional[Any]= retriever.retrieve(snake_case__ , n_docs=snake_case__ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] ) self.assertEqual(len(doc_dicts[0]["text"] ) , snake_case__ ) self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(snake_case__ ) lowercase__ : List[Any]= RagRetriever.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) lowercase__ : str= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Tuple= retriever.retrieve(snake_case__ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase_ ( self ): '''simple docstring''' import torch lowercase__ : str= 1 lowercase__ : Union[str, Any]= self.get_dummy_canonical_hf_index_retriever() lowercase__ : str= [[5, 7], [10, 11]] lowercase__ : List[str]= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : Dict= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) lowercase__, lowercase__, lowercase__ : Optional[int]= ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertIsInstance(snake_case__ , np.ndarray ) lowercase__ : Any= retriever( snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ , return_tensors="pt" , ) lowercase__, lowercase__, lowercase__, lowercase__ : Tuple= ( # noqa: F841 out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], out["doc_ids"], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) self.assertIsInstance(snake_case__ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.get_dpr_ctx_encoder_tokenizer() lowercase__ : Dict= 1 lowercase__ : Any= self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ ) retriever.set_ctx_encoder_tokenizer(snake_case__ ) lowercase__ : List[str]= [[5, 7], [10, 11]] lowercase__ : Any= np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowercase__ : List[Any]= retriever(snake_case__ , snake_case__ , prefix=retriever.config.generator.prefix , n_docs=snake_case__ ) self.assertEqual( len(snake_case__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , snake_case__ ) # check for doc token related keys in dictionary.
85
0
"""simple docstring""" import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin a : Optional[Any] = 1e-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class __UpperCAmelCase: """simple docstring""" def __init__( self , snake_case__ , snake_case__=16 , snake_case__=13 , snake_case__=7 , snake_case__=14 , snake_case__=10 , snake_case__=19 , snake_case__=5 , snake_case__=4 , snake_case__=True , snake_case__=16 , snake_case__=2 , snake_case__=4 , snake_case__=4 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=[1, 2, 3, 4, 5] , snake_case__=25 , snake_case__=5 , ): '''simple docstring''' lowercase__ : Dict= d_model lowercase__ : Optional[Any]= parent lowercase__ : Any= batch_size lowercase__ : Union[str, Any]= prediction_length lowercase__ : List[Any]= context_length lowercase__ : Dict= cardinality lowercase__ : str= num_time_features lowercase__ : int= lags_sequence lowercase__ : Any= embedding_dimension lowercase__ : Dict= is_training lowercase__ : int= hidden_size lowercase__ : int= num_hidden_layers lowercase__ : Tuple= num_attention_heads lowercase__ : List[str]= intermediate_size lowercase__ : List[Any]= hidden_act lowercase__ : Optional[int]= hidden_dropout_prob lowercase__ : str= attention_probs_dropout_prob lowercase__ : List[Any]= context_length lowercase__ : int= prediction_length + label_length lowercase__ : Any= label_length lowercase__ : List[str]= moving_average lowercase__ : int= autocorrelation_factor def UpperCAmelCase_ ( self ): '''simple docstring''' return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' lowercase__ : Union[str, Any]= config.context_length + max(config.lags_sequence ) lowercase__ : Optional[Any]= ids_tensor([self.batch_size, 1] , config.cardinality[0] ) lowercase__ : Any= floats_tensor([self.batch_size, _past_length, config.num_time_features] ) lowercase__ : List[Any]= floats_tensor([self.batch_size, _past_length] ) lowercase__ : str= floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs lowercase__ : List[Any]= floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) lowercase__ : Union[str, Any]= floats_tensor([self.batch_size, config.prediction_length] ) lowercase__ : Optional[int]= { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= self.get_config() lowercase__ : str= self.prepare_autoformer_inputs_dict(_a ) return config, inputs_dict def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Optional[Any]= self.prepare_config_and_inputs() return config, inputs_dict def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : Optional[int]= AutoformerModel(config=_a ).to(_a ).eval() lowercase__ : Tuple= model(**_a ) lowercase__ : Any= outputs.encoder_last_hidden_state lowercase__ : int= outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : Optional[Any]= model.get_encoder() encoder.save_pretrained(_a ) lowercase__ : Optional[int]= AutoformerEncoder.from_pretrained(_a ).to(_a ) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= model.create_network_inputs(**_a ) lowercase__, lowercase__ : Union[str, Any]= model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) lowercase__ : Union[str, Any]= torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) lowercase__ : Optional[Any]= encoder(inputs_embeds=_a )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) lowercase__ : int= ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) lowercase__ : str= torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) lowercase__ : Tuple= torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) lowercase__ : Any= torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : Any= model.get_decoder() decoder.save_pretrained(_a ) lowercase__ : Any= AutoformerDecoder.from_pretrained(_a ).to(_a ) lowercase__ : List[Any]= decoder( trend=_a , inputs_embeds=_a , encoder_hidden_states=_a , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class __UpperCAmelCase( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" __lowerCamelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () __lowerCamelCase = (AutoformerForPrediction,) if is_torch_available() else () __lowerCamelCase = {'feature-extraction': AutoformerModel} if is_torch_available() else {} __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= AutoformerModelTester(self ) lowercase__ : Union[str, Any]= ConfigTester(self , config_class=_a , has_text_modality=_a ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: lowercase__ : Dict= model_class(_a ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_a ) lowercase__, lowercase__ : Dict= model_class.from_pretrained(_a , output_loading_info=_a ) self.assertEqual(info["missing_keys"] , [] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*_a ) @unittest.skip(reason="Model has no tokens embeddings" ) def UpperCAmelCase_ ( self ): '''simple docstring''' pass def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= inspect.signature(getattr(_a , "forward" ) ) # The main input is the name of the argument after `self` lowercase__ : Optional[int]= list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , _a ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : int= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[Any]= model_class(_a ) lowercase__ : Any= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : str= [*signature.parameters.keys()] lowercase__ : Union[str, Any]= [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(_a )] , _a ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Dict= self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Union[str, Any]= True lowercase__ : int= getattr(self.model_tester , "seq_length" , _a ) lowercase__ : Optional[int]= getattr(self.model_tester , "decoder_seq_length" , _a ) lowercase__ : Tuple= getattr(self.model_tester , "encoder_seq_length" , _a ) lowercase__ : Any= getattr(self.model_tester , "d_model" , _a ) lowercase__ : Optional[Any]= getattr(self.model_tester , "num_attention_heads" , _a ) lowercase__ : List[Any]= d_model // num_attention_heads for model_class in self.all_model_classes: lowercase__ : List[Any]= True lowercase__ : Union[str, Any]= False lowercase__ : Optional[int]= True lowercase__ : Union[str, Any]= model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): lowercase__ : Any= model(**self._prepare_for_class(_a , _a ) ) lowercase__ : Tuple= outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : List[Any]= True lowercase__ : List[str]= model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): lowercase__ : Tuple= model(**self._prepare_for_class(_a , _a ) ) lowercase__ : List[Any]= outputs.encoder_attentions self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) lowercase__ : str= len(_a ) lowercase__ : Union[str, Any]= 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(_a , _a ) # decoder attentions lowercase__ : Optional[Any]= outputs.decoder_attentions self.assertIsInstance(_a , (list, tuple) ) self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions lowercase__ : Tuple= outputs.cross_attentions self.assertIsInstance(_a , (list, tuple) ) self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine lowercase__ : Union[str, Any]= True lowercase__ : Dict= True lowercase__ : Any= model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): lowercase__ : int= model(**self._prepare_for_class(_a , _a ) ) self.assertEqual(out_len + 2 , len(_a ) ) lowercase__ : Optional[Any]= outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def UpperCAmelCase_ ( self ): '''simple docstring''' super().test_retain_grad_hidden_states_attentions() def lowercase__(A="train-batch.pt" ) ->List[str]: """simple docstring""" lowercase__ : List[str]= hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=lowerCAmelCase__ , repo_type="dataset" ) lowercase__ : Union[str, Any]= torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ ) return batch @require_torch @slow class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_a ) lowercase__ : Optional[Any]= prepare_batch() with torch.no_grad(): lowercase__ : str= model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] lowercase__ : List[str]= torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , _a ) lowercase__ : Dict= torch.tensor( [[0.35_93, -1.33_98, 0.63_30], [0.22_79, 1.53_96, -0.17_92], [0.04_50, 1.32_25, -0.23_35]] , device=_a ) self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_a ) lowercase__ : List[str]= prepare_batch("val-batch.pt" ) with torch.no_grad(): lowercase__ : List[str]= model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state lowercase__ : Tuple= torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , _a ) lowercase__ : Union[str, Any]= torch.tensor( [[-0.07_34, -0.90_36, 0.83_58], [4.71_86, 2.41_13, 1.95_81], [1.79_53, 2.35_58, 1.29_70]] , device=_a ) self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(_a ) lowercase__ : Optional[int]= prepare_batch("val-batch.pt" ) with torch.no_grad(): lowercase__ : Optional[Any]= model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) lowercase__ : Optional[Any]= torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , _a ) lowercase__ : Union[str, Any]= torch.tensor([31_30.67_63, 40_56.52_93, 70_53.07_86] , device=_a ) lowercase__ : Union[str, Any]= outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _a , rtol=1e-1 ) )
721
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = ["image_processor", "tokenizer"] __lowerCamelCase = "AutoImageProcessor" __lowerCamelCase = "AutoTokenizer" def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) lowercase__ : List[Any]= self.image_processor def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ): '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowercase__ : Tuple= self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if images is not None: lowercase__ : str= self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if text is not None and images is not None: lowercase__ : Any= image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def UpperCAmelCase_ ( self ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
85
0
"""simple docstring""" import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a : Union[str, Any] = { """facebook/mask2former-swin-small-coco-instance""": ( """https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json""" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } a : List[Any] = logging.get_logger(__name__) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "mask2former" __lowerCamelCase = ["swin"] __lowerCamelCase = {"hidden_size": "hidden_dim"} def __init__( self , snake_case__ = None , snake_case__ = 256 , snake_case__ = 256 , snake_case__ = 256 , snake_case__ = 1024 , snake_case__ = "relu" , snake_case__ = 6 , snake_case__ = 10 , snake_case__ = 8 , snake_case__ = 0.0 , snake_case__ = 2048 , snake_case__ = False , snake_case__ = False , snake_case__ = 4 , snake_case__ = 255 , snake_case__ = 100 , snake_case__ = 0.1 , snake_case__ = 2.0 , snake_case__ = 5.0 , snake_case__ = 5.0 , snake_case__ = 12544 , snake_case__ = 3.0 , snake_case__ = 0.75 , snake_case__ = 0.02 , snake_case__ = 1.0 , snake_case__ = True , snake_case__ = [4, 8, 16, 32] , snake_case__ = None , **snake_case__ , ): '''simple docstring''' if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." ) lowercase__ : Dict= CONFIG_MAPPING["swin"]( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCamelCase__ , out_features=["stage1", "stage2", "stage3", "stage4"] , ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase__ : Optional[Any]= backbone_config.pop("model_type" ) lowercase__ : List[str]= CONFIG_MAPPING[backbone_model_type] lowercase__ : Optional[Any]= config_class.from_dict(UpperCamelCase__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ''' F'''Supported model types: {','.join(self.backbones_supported )}''' ) lowercase__ : int= backbone_config lowercase__ : List[str]= feature_size lowercase__ : Optional[int]= mask_feature_size lowercase__ : int= hidden_dim lowercase__ : Any= encoder_feedforward_dim lowercase__ : Dict= activation_function lowercase__ : Optional[Any]= encoder_layers lowercase__ : int= decoder_layers lowercase__ : Tuple= num_attention_heads lowercase__ : Any= dropout lowercase__ : Optional[Any]= dim_feedforward lowercase__ : str= pre_norm lowercase__ : Tuple= enforce_input_projection lowercase__ : str= common_stride lowercase__ : List[str]= ignore_value lowercase__ : Union[str, Any]= num_queries lowercase__ : int= no_object_weight lowercase__ : Dict= class_weight lowercase__ : Optional[int]= mask_weight lowercase__ : Any= dice_weight lowercase__ : Any= train_num_points lowercase__ : Tuple= oversample_ratio lowercase__ : Tuple= importance_sample_ratio lowercase__ : Union[str, Any]= init_std lowercase__ : int= init_xavier_std lowercase__ : Tuple= use_auxiliary_loss lowercase__ : Dict= feature_strides lowercase__ : Dict= output_auxiliary_logits lowercase__ : int= decoder_layers super().__init__(**UpperCamelCase__ ) @classmethod def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ): '''simple docstring''' return cls( backbone_config=UpperCamelCase__ , **UpperCamelCase__ , ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ ) lowercase__ : Optional[int]= self.backbone_config.to_dict() lowercase__ : List[str]= self.__class__.model_type return output
700
"""simple docstring""" a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/""" def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ): lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(A ) lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data ) lowercase__ : Tuple= len(A ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(A ) % 6) else: lowercase__ : str= b"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(A ) , 6 ) ).encode() + padding ) def lowercase__(A ) ->bytes: """simple docstring""" if not isinstance(A , A ) and not isinstance(A , A ): lowercase__ : str= ( "argument should be a bytes-like object or ASCII string, " f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(A , A ): try: lowercase__ : Optional[Any]= encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) lowercase__ : List[Any]= encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase__ : str= encoded_data[:-padding] lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase__ : Tuple= "".join( bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data ) lowercase__ : Any= [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(A ) , 8 ) ] return bytes(A ) if __name__ == "__main__": import doctest doctest.testmod()
85
0
"""simple docstring""" from __future__ import annotations def lowercase__(A ) ->Optional[int]: """simple docstring""" if not nums: return 0 lowercase__ : Union[str, Any]= nums[0] lowercase__ : Optional[Any]= 0 for num in nums[1:]: lowercase__, lowercase__ : Tuple= ( max_excluding + num, max(A , A ), ) return max(A , A ) if __name__ == "__main__": import doctest doctest.testmod()
701
"""simple docstring""" from __future__ import annotations def lowercase__(A ) ->list[int]: # This function is recursive """simple docstring""" lowercase__ : int= len(A ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else lowercase__ : str= array[0] lowercase__ : Optional[Any]= False lowercase__ : Any= 1 lowercase__ : list[int]= [] while not is_found and i < array_length: if array[i] < pivot: lowercase__ : Union[str, Any]= True lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]] lowercase__ : Union[str, Any]= longest_subsequence(A ) if len(A ) > len(A ): lowercase__ : List[str]= temp_array else: i += 1 lowercase__ : List[str]= [element for element in array[1:] if element >= pivot] lowercase__ : List[str]= [pivot, *longest_subsequence(A )] if len(A ) > len(A ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
85
0
"""simple docstring""" from statistics import mean, stdev def lowercase__(A , A = 3 ) ->list: """simple docstring""" lowercase__ : str= min(SCREAMING_SNAKE_CASE_ ) lowercase__ : Optional[int]= max(SCREAMING_SNAKE_CASE_ ) # normalize data return [round((x - x_min) / (x_max - x_min) , SCREAMING_SNAKE_CASE_ ) for x in data] def lowercase__(A , A = 3 ) ->list: """simple docstring""" lowercase__ : int= mean(SCREAMING_SNAKE_CASE_ ) lowercase__ : int= stdev(SCREAMING_SNAKE_CASE_ ) # standardize data return [round((x - mu) / (sigma) , SCREAMING_SNAKE_CASE_ ) for x in data]
702
"""simple docstring""" import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": a : int = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) a : List[str] = parser.parse_args() a : List[str] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) a : Optional[Any] = CLIPImageProcessor() a : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") a : Tuple = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
85
0
"""simple docstring""" from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = None , **snake_case__ , ): '''simple docstring''' super().__init__( snake_case__ , split=snake_case__ , features=snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ , streaming=snake_case__ , num_proc=snake_case__ , **snake_case__ , ) lowercase__ : Dict= path_or_paths if isinstance(snake_case__ , snake_case__ ) else {self.split: path_or_paths} lowercase__ : Optional[Any]= Text( cache_dir=snake_case__ , data_files=snake_case__ , features=snake_case__ , **snake_case__ , ) def UpperCAmelCase_ ( self ): '''simple docstring''' if self.streaming: lowercase__ : int= self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowercase__ : Dict= None lowercase__ : Optional[Any]= None lowercase__ : str= None lowercase__ : int= None self.builder.download_and_prepare( download_config=snake_case__ , download_mode=snake_case__ , verification_mode=snake_case__ , base_path=snake_case__ , num_proc=self.num_proc , ) lowercase__ : Dict= self.builder.as_dataset( split=self.split , verification_mode=snake_case__ , in_memory=self.keep_in_memory ) return dataset
703
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() a : Optional[Any] = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]: """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) lowercase__ : List[Any]= config_class.from_json_file(A ) lowercase__ : Any= True lowercase__ : List[str]= True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ : Optional[int]= model_class(A ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ : List[str]= cached_file( A , A , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A ) if compare_with_pt_model: lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" ) lowercase__ : Union[str, Any]= pt_model_class.from_pretrained( pretrained_model_name_or_path=A , config=A , state_dict=A ) with torch.no_grad(): lowercase__ : str= pt_model(**pt_model.dummy_inputs ) lowercase__ : Tuple= pto[0].numpy() lowercase__ : List[Any]= tfo[0].numpy() lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(A , save_format="h5" ) def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]: """simple docstring""" if args_model_type is None: lowercase__ : Tuple= list(MODEL_CLASSES.keys() ) else: lowercase__ : Optional[int]= [args_model_type] for j, model_type in enumerate(A , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(A )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ : int= list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ : Any= model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(A , A ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ : Any= model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Union[str, Any]= config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ : str= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Any= model_shortcut_name if os.path.isfile(A ): lowercase__ : Dict= "converted_model" convert_pt_checkpoint_to_tf( model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , ) if remove_cached_files: os.remove(A ) os.remove(A ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """ """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") a : List[str] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
85
0
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCAmelCase( __A , __A , unittest.TestCase ): __lowerCamelCase = IFInpaintingSuperResolutionPipeline __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} __lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} ) __lowerCamelCase = PipelineTesterMixin.required_optional_params - {"latents"} def UpperCAmelCase_ ( self ): '''simple docstring''' return self._get_superresolution_dummy_components() def UpperCAmelCase_ ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' if str(snake_case__ ).startswith("mps" ): lowercase__ : Optional[int]= torch.manual_seed(snake_case__ ) else: lowercase__ : int= torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) lowercase__ : Tuple= floats_tensor((1, 3, 16, 16) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) lowercase__ : Optional[Any]= floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) lowercase__ : List[str]= floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) lowercase__ : Optional[Any]= { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def UpperCAmelCase_ ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def UpperCAmelCase_ ( self ): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1e-1 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self._test_save_load_local() def UpperCAmelCase_ ( self ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
704
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
85
0
from statistics import mean, stdev def lowercase__(A , A = 3 ) ->Optional[Any]: """simple docstring""" lowercase__ : Tuple= min(__snake_case ) lowercase__ : List[str]= max(__snake_case ) # normalize data return [round((x - x_min) / (x_max - x_min) , __snake_case ) for x in data] def lowercase__(A , A = 3 ) ->Tuple: """simple docstring""" lowercase__ : str= mean(__snake_case ) lowercase__ : int= stdev(__snake_case ) # standardize data return [round((x - mu) / (sigma) , __snake_case ) for x in data]
705
"""simple docstring""" def lowercase__(A ) ->list: """simple docstring""" if n_term == "": return [] lowercase__ : list= [] for temp in range(int(A ) ): series.append(f'''1/{temp + 1}''' if series else "1" ) return series if __name__ == "__main__": a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""") print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""") print(harmonic_series(nth_term))
85
0
"""simple docstring""" import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() a : List[str] = logging.get_logger(__name__) a : Dict = '''https://openaipublic.azureedge.net/jukebox/models/''' a : Tuple = { '''jukebox-1b-lyrics''': [ '''5b/vqvae.pth.tar''', '''5b/prior_level_0.pth.tar''', '''5b/prior_level_1.pth.tar''', '''1b_lyrics/prior_level_2.pth.tar''', ], '''jukebox-5b-lyrics''': [ '''5b/vqvae.pth.tar''', '''5b/prior_level_0.pth.tar''', '''5b/prior_level_1.pth.tar''', '''5b_lyrics/prior_level_2.pth.tar''', ], } def lowercase__(A ) ->Dict: """simple docstring""" if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10: lowercase__ : Tuple= key.replace(".model.1.bias" , ".conv1d_1.bias" ) elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10: lowercase__ : Union[str, Any]= key.replace(".model.1.weight" , ".conv1d_1.weight" ) elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10: lowercase__ : Union[str, Any]= key.replace(".model.3.bias" , ".conv1d_2.bias" ) elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10: lowercase__ : str= key.replace(".model.3.weight" , ".conv1d_2.weight" ) if "conditioner_blocks.0." in key: lowercase__ : List[Any]= key.replace("conditioner_blocks.0" , "conditioner_blocks" ) if "prime_prior" in key: lowercase__ : int= key.replace("prime_prior" , "encoder" ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: lowercase__ : str= key.replace(".emb." , "." ) if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook return key.replace(".k" , ".codebook" ) if "y_emb." in key: return key.replace("y_emb." , "metadata_embedding." ) if "x_emb.emb." in key: lowercase__ : Dict= key.replace("0.x_emb.emb" , "embed_tokens" ) if "prime_state_ln" in key: return key.replace("prime_state_ln" , "encoder.final_layer_norm" ) if ".ln" in key: return key.replace(".ln" , ".layer_norm" ) if "_ln" in key: return key.replace("_ln" , "_layer_norm" ) if "prime_state_proj" in key: return key.replace("prime_state_proj" , "encoder.proj_in" ) if "prime_x_out" in key: return key.replace("prime_x_out" , "encoder.lm_head" ) if "prior.x_out" in key: return key.replace("x_out" , "fc_proj_out" ) if "x_emb" in key: return key.replace("x_emb" , "embed_tokens" ) return key def lowercase__(A , A , A , A ) ->List[str]: """simple docstring""" lowercase__ : Union[str, Any]= {} import re lowercase__ : Dict= re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) lowercase__ : Optional[int]= re.compile( R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) lowercase__ : Optional[Any]= re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) lowercase__ : Optional[Any]= re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) lowercase__ : List[str]= re.compile( R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) lowercase__ : Optional[int]= re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) lowercase__ : Union[str, Any]= re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" ) lowercase__ : str= re.compile( R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) lowercase__ : List[str]= re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(lowerCAmelCase__ ): lowercase__ : Any= re_encoder_block_conv_in.match(lowerCAmelCase__ ) lowercase__ : List[str]= regex_match.groups() lowercase__ : Any= int(groups[2] ) * 2 + int(groups[3] ) lowercase__ : List[Any]= f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}''' lowercase__ : Union[str, Any]= re_encoder_block_conv_in.sub(lowerCAmelCase__ , lowerCAmelCase__ ) elif re_encoder_block_resnet.fullmatch(lowerCAmelCase__ ): lowercase__ : Union[str, Any]= re_encoder_block_resnet.match(lowerCAmelCase__ ) lowercase__ : List[str]= regex_match.groups() lowercase__ : Dict= int(groups[2] ) * 2 + int(groups[3] ) lowercase__ : List[Any]= {"1": 1, "3": 2}[groups[-2]] lowercase__ : int= f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.''' lowercase__ : str= f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowercase__ : Any= prefix + resnet_block lowercase__ : str= re_encoder_block_resnet.sub(lowerCAmelCase__ , lowerCAmelCase__ ) elif re_encoder_block_proj_out.fullmatch(lowerCAmelCase__ ): lowercase__ : Optional[Any]= re_encoder_block_proj_out.match(lowerCAmelCase__ ) lowercase__ : int= regex_match.groups() lowercase__ : List[str]= f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}''' lowercase__ : List[str]= re_encoder_block_proj_out.sub(lowerCAmelCase__ , lowerCAmelCase__ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(lowerCAmelCase__ ): lowercase__ : Optional[int]= re_decoder_block_conv_out.match(lowerCAmelCase__ ) lowercase__ : List[Any]= regex_match.groups() lowercase__ : Optional[Any]= int(groups[2] ) * 2 + int(groups[3] ) - 2 lowercase__ : Dict= f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}''' lowercase__ : str= re_decoder_block_conv_out.sub(lowerCAmelCase__ , lowerCAmelCase__ ) elif re_decoder_block_resnet.fullmatch(lowerCAmelCase__ ): lowercase__ : List[Any]= re_decoder_block_resnet.match(lowerCAmelCase__ ) lowercase__ : Dict= regex_match.groups() lowercase__ : List[str]= int(groups[2] ) * 2 + int(groups[3] ) - 2 lowercase__ : Optional[int]= {"1": 1, "3": 2}[groups[-2]] lowercase__ : Optional[int]= f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.''' lowercase__ : Optional[Any]= f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowercase__ : Union[str, Any]= prefix + resnet_block lowercase__ : Optional[Any]= re_decoder_block_resnet.sub(lowerCAmelCase__ , lowerCAmelCase__ ) elif re_decoder_block_proj_in.fullmatch(lowerCAmelCase__ ): lowercase__ : List[str]= re_decoder_block_proj_in.match(lowerCAmelCase__ ) lowercase__ : Union[str, Any]= regex_match.groups() lowercase__ : Dict= f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}''' lowercase__ : Tuple= re_decoder_block_proj_in.sub(lowerCAmelCase__ , lowerCAmelCase__ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(lowerCAmelCase__ ): lowercase__ : List[str]= re_prior_cond_conv_out.match(lowerCAmelCase__ ) lowercase__ : List[str]= regex_match.groups() lowercase__ : Tuple= int(groups[1] ) * 2 + int(groups[2] ) - 2 lowercase__ : Optional[Any]= f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}''' lowercase__ : Any= re_prior_cond_conv_out.sub(lowerCAmelCase__ , lowerCAmelCase__ ) elif re_prior_cond_resnet.fullmatch(lowerCAmelCase__ ): lowercase__ : Union[str, Any]= re_prior_cond_resnet.match(lowerCAmelCase__ ) lowercase__ : Union[str, Any]= regex_match.groups() lowercase__ : Dict= int(groups[1] ) * 2 + int(groups[2] ) - 2 lowercase__ : List[Any]= {"1": 1, "3": 2}[groups[-2]] lowercase__ : Dict= f'''conditioner_blocks.upsampler.upsample_block.{block_index}.''' lowercase__ : Dict= f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowercase__ : List[Any]= prefix + resnet_block lowercase__ : Any= re_prior_cond_resnet.sub(lowerCAmelCase__ , lowerCAmelCase__ ) elif re_prior_cond_proj_in.fullmatch(lowerCAmelCase__ ): lowercase__ : Dict= re_prior_cond_proj_in.match(lowerCAmelCase__ ) lowercase__ : Optional[Any]= regex_match.groups() lowercase__ : List[str]= f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}''' lowercase__ : List[Any]= re_prior_cond_proj_in.sub(lowerCAmelCase__ , lowerCAmelCase__ ) # keep original key else: lowercase__ : List[str]= original_key lowercase__ : List[Any]= replace_key(lowerCAmelCase__ ) if f'''{key_prefix}.{key}''' not in model_state_dict or key is None: print(f'''failed converting {original_key} to {key}, does not match''' ) # handle missmatched shape elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape: lowercase__ : int= model_state_dict[f'''{key_prefix}.{key}'''] print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' ) lowercase__ : Optional[int]= original_key lowercase__ : Dict= original_key lowercase__ : str= value return new_dict @torch.no_grad() def lowercase__(A=None , A=None ) ->Any: """simple docstring""" for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ): lowercase__ : Union[str, Any]= requests.get(f'''{PREFIX}{file}''' , allow_redirects=lowerCAmelCase__ ) os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=lowerCAmelCase__ ) open(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , "wb" ).write(r.content ) lowercase__ : str= MODEL_MAPPING[model_name.split("/" )[-1]] lowercase__ : Optional[int]= JukeboxConfig.from_pretrained(lowerCAmelCase__ ) lowercase__ : str= JukeboxModel(lowerCAmelCase__ ) lowercase__ : List[Any]= [] lowercase__ : Union[str, Any]= {} for i, dict_name in enumerate(lowerCAmelCase__ ): lowercase__ : Any= torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )["model"] lowercase__ : List[Any]= {} for k in old_dic.keys(): if k.endswith(".b" ): lowercase__ : Union[str, Any]= old_dic[k] elif k.endswith(".w" ): lowercase__ : str= old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: lowercase__ : Dict= old_dic[k] else: lowercase__ : Union[str, Any]= old_dic[k] lowercase__ : int= "vqvae" if i == 0 else f'''priors.{3 - i}''' lowercase__ : Any= fix_jukebox_keys(lowerCAmelCase__ , model.state_dict() , lowerCAmelCase__ , lowerCAmelCase__ ) weight_dict.append(lowerCAmelCase__ ) lowercase__ : List[str]= weight_dict.pop(0 ) model.vqvae.load_state_dict(lowerCAmelCase__ ) for i in range(len(lowerCAmelCase__ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) with open(f'''{pytorch_dump_folder_path}/mapping.json''' , "w" ) as txtfile: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCAmelCase__ ) return weight_dict if __name__ == "__main__": a : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""jukebox-5b-lyrics""", type=str, help="""Name of the model you\'d like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""jukebox-5b-lyrics-converted""", type=str, help="""Path to the output PyTorch model directory.""", ) a : List[Any] = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
706
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : int = logging.get_logger(__name__) a : str = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = "big_bird" def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ): '''simple docstring''' super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , ) lowercase__ : Dict= vocab_size lowercase__ : Optional[int]= max_position_embeddings lowercase__ : List[Any]= hidden_size lowercase__ : List[str]= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : Optional[int]= intermediate_size lowercase__ : Optional[int]= hidden_act lowercase__ : Tuple= hidden_dropout_prob lowercase__ : int= attention_probs_dropout_prob lowercase__ : int= initializer_range lowercase__ : List[Any]= type_vocab_size lowercase__ : Union[str, Any]= layer_norm_eps lowercase__ : Optional[Any]= use_cache lowercase__ : Union[str, Any]= rescale_embeddings lowercase__ : Union[str, Any]= attention_type lowercase__ : Any= use_bias lowercase__ : List[Any]= block_size lowercase__ : Optional[Any]= num_random_blocks lowercase__ : Optional[int]= classifier_dropout class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def UpperCAmelCase_ ( self ): '''simple docstring''' if self.task == "multiple-choice": lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : Tuple= {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
85
0
"""simple docstring""" a : int = """Alexander Joslin""" import operator as op from .stack import Stack def lowercase__(A ) ->str: """simple docstring""" lowercase__ : str= {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub} lowercase__ : Stack[int]= Stack() lowercase__ : Stack[str]= Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(lowerCAmelCase_ ) ) elif i in operators: # RULE 2 operator_stack.push(lowerCAmelCase_ ) elif i == ")": # RULE 4 lowercase__ : Any= operator_stack.peek() operator_stack.pop() lowercase__ : Dict= operand_stack.peek() operand_stack.pop() lowercase__ : Dict= operand_stack.peek() operand_stack.pop() lowercase__ : Optional[Any]= operators[opr](lowerCAmelCase_ , lowerCAmelCase_ ) operand_stack.push(lowerCAmelCase_ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": a : List[str] = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
707
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
85
0
from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter a : str = """Create a default config file for Accelerate with only a few flags set.""" def lowercase__(A="no" , A = default_json_config_file , A = False ) ->Dict: """simple docstring""" lowercase__ : List[Any]= Path(_snake_case ) path.parent.mkdir(parents=_snake_case , exist_ok=_snake_case ) if path.exists(): print( f'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False lowercase__ : Any= mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) lowercase__ : List[str]= { "compute_environment": "LOCAL_MACHINE", "mixed_precision": mixed_precision, } if torch.cuda.is_available(): lowercase__ : Any= torch.cuda.device_count() lowercase__ : Any= num_gpus lowercase__ : Optional[Any]= False if num_gpus > 1: lowercase__ : Optional[int]= "MULTI_GPU" else: lowercase__ : List[Any]= "NO" elif is_xpu_available() and use_xpu: lowercase__ : Optional[int]= torch.xpu.device_count() lowercase__ : int= num_xpus lowercase__ : Dict= False if num_xpus > 1: lowercase__ : Tuple= "MULTI_XPU" else: lowercase__ : int= "NO" elif is_npu_available(): lowercase__ : int= torch.npu.device_count() lowercase__ : Dict= num_npus lowercase__ : Optional[Any]= False if num_npus > 1: lowercase__ : Tuple= "MULTI_NPU" else: lowercase__ : str= "NO" else: lowercase__ : Optional[int]= 0 lowercase__ : Dict= True lowercase__ : int= 1 lowercase__ : str= "NO" lowercase__ : List[Any]= ClusterConfig(**_snake_case ) config.to_json_file(_snake_case ) return path def lowercase__(A , A ) ->List[str]: """simple docstring""" lowercase__ : Any= parser.add_parser("default" , parents=_snake_case , help=_snake_case , formatter_class=_snake_case ) parser.add_argument( "--config_file" , default=_snake_case , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , dest="save_location" , ) parser.add_argument( "--mixed_precision" , choices=["no", "fp16", "bf16"] , type=_snake_case , help="Whether or not to use mixed precision training. " "Choose between FP16 and BF16 (bfloat16) training. " "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , ) parser.set_defaults(func=_snake_case ) return parser def lowercase__(A ) ->Optional[Any]: """simple docstring""" lowercase__ : Optional[int]= write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(f'''accelerate configuration saved at {config_file}''' )
708
"""simple docstring""" from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def lowercase__(A , A ) ->List[Any]: """simple docstring""" lowercase__ : str= [] for part_id in partition_order: lowercase__ : int= df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(A ): expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : Optional[Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Dict= Spark(A ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Union[str, Any]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(2 ) lowercase__ : Optional[Any]= [1, 0] lowercase__ : List[str]= _generate_iterable_examples(A , A ) # Reverse the partitions. lowercase__ : int= _get_expected_row_ids_and_row_dicts_for_partition_order(A , A ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowercase__, lowercase__ : Any= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->int: """simple docstring""" lowercase__ : int= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Dict= spark.range(10 ).repartition(1 ) lowercase__ : str= SparkExamplesIterable(A ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(A ): assert row_id == f'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->str: """simple docstring""" lowercase__ : List[str]= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : int= spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: lowercase__ : Optional[Any]= lambda A : x.reverse() lowercase__ : Tuple= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] ) lowercase__ : List[str]= SparkExamplesIterable(A ).shuffle_data_sources(A ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : str= expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Any: """simple docstring""" lowercase__ : Dict= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Union[str, Any]= spark.range(20 ).repartition(4 ) # Partitions 0 and 2 lowercase__ : Optional[int]= SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : Union[str, Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Tuple= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowercase__ : Tuple= SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowercase__ : List[Any]= _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] ) for i, (row_id, row_dict) in enumerate(A ): lowercase__, lowercase__ : Dict= expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowercase__() ->Tuple: """simple docstring""" lowercase__ : Any= pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() lowercase__ : Tuple= spark.range(100 ).repartition(1 ) lowercase__ : Optional[int]= Spark(A ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
85
0
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def lowercase__(A , A ) ->str: """simple docstring""" lowercase__ : Union[str, Any]= args.log_outputs lowercase__ : Tuple= "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric lowercase__ : Optional[int]= load_metric("wer" ) lowercase__ : int= load_metric("cer" ) # compute metrics lowercase__ : int= wer.compute(references=result["target"] , predictions=result["prediction"] ) lowercase__ : Tuple= cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results lowercase__ : str= f'''WER: {wer_result}\nCER: {cer_result}''' print(_A ) with open(f'''{dataset_id}_eval_results.txt''' , "w" ) as f: f.write(_A ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: lowercase__ : List[str]= f'''log_{dataset_id}_predictions.txt''' lowercase__ : List[str]= f'''log_{dataset_id}_targets.txt''' with open(_A , "w" ) as p, open(_A , "w" ) as t: # mapping function to write output def write_to_file(A , A ): p.write(f'''{i}''' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f'''{i}''' + "\n" ) t.write(batch["target"] + "\n" ) result.map(_A , with_indices=_A ) def lowercase__(A ) ->str: """simple docstring""" lowercase__ : str= "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training lowercase__ : Any= re.sub(_A , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! lowercase__ : List[str]= ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: lowercase__ : str= " ".join(text.split(_A ) ) return text def lowercase__(A ) ->Tuple: """simple docstring""" lowercase__ : List[str]= load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor lowercase__ : List[Any]= AutoFeatureExtractor.from_pretrained(args.model_id ) lowercase__ : Optional[Any]= feature_extractor.sampling_rate # resample audio lowercase__ : Optional[int]= dataset.cast_column("audio" , Audio(sampling_rate=_A ) ) # load eval pipeline if args.device is None: lowercase__ : Optional[Any]= 0 if torch.cuda.is_available() else -1 lowercase__ : Dict= pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(A ): lowercase__ : Union[str, Any]= asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) lowercase__ : Any= prediction["text"] lowercase__ : Tuple= normalize_text(batch["sentence"] ) return batch # run inference on all examples lowercase__ : Any= dataset.map(_A , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_A , _A ) if __name__ == "__main__": a : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `\'en\'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `\'test\'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) a : Dict = parser.parse_args() main(args)
709
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=2 , snake_case__=99 , snake_case__=0 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=12 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__="last" , snake_case__=None , snake_case__=None , ): '''simple docstring''' lowercase__ : Optional[int]= parent lowercase__ : Tuple= batch_size lowercase__ : Tuple= seq_length lowercase__ : str= is_training lowercase__ : str= use_input_lengths lowercase__ : Any= use_token_type_ids lowercase__ : List[Any]= use_labels lowercase__ : Optional[int]= gelu_activation lowercase__ : str= sinusoidal_embeddings lowercase__ : List[str]= causal lowercase__ : Any= asm lowercase__ : Optional[int]= n_langs lowercase__ : Union[str, Any]= vocab_size lowercase__ : int= n_special lowercase__ : Any= hidden_size lowercase__ : int= num_hidden_layers lowercase__ : List[str]= num_attention_heads lowercase__ : List[str]= hidden_dropout_prob lowercase__ : str= attention_probs_dropout_prob lowercase__ : Any= max_position_embeddings lowercase__ : List[Any]= type_vocab_size lowercase__ : int= type_sequence_label_size lowercase__ : Any= initializer_range lowercase__ : Optional[int]= num_labels lowercase__ : Union[str, Any]= num_choices lowercase__ : List[Any]= summary_type lowercase__ : Optional[int]= use_proj lowercase__ : int= scope def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Dict= random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Tuple= None if self.use_input_lengths: lowercase__ : List[Any]= ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase__ : Tuple= None if self.use_token_type_ids: lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase__ : str= None lowercase__ : Tuple= None lowercase__ : Dict= None if self.use_labels: lowercase__ : Optional[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Optional[Any]= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Tuple= ids_tensor([self.batch_size] , 2 ).float() lowercase__ : Tuple= ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : List[Any]= self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase_ ( self ): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : Any= FlaubertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : str= model(snake_case__ , lengths=snake_case__ , langs=snake_case__ ) lowercase__ : str= model(snake_case__ , langs=snake_case__ ) lowercase__ : Any= model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertWithLMHeadModel(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : str= FlaubertForQuestionAnsweringSimple(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : List[str]= model(snake_case__ ) lowercase__ : Dict= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= FlaubertForQuestionAnswering(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= model(snake_case__ ) lowercase__ : Any= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , ) lowercase__ : List[str]= model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , ) ((lowercase__), ) : Optional[Any]= result_with_labels.to_tuple() lowercase__ : Union[str, Any]= model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) ((lowercase__), ) : List[Any]= result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[str]= FlaubertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Optional[Any]= model(snake_case__ ) lowercase__ : Optional[Any]= model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : List[Any]= self.num_labels lowercase__ : Union[str, Any]= FlaubertForTokenClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : int= model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' lowercase__ : int= self.num_choices lowercase__ : str= FlaubertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowercase__ : Dict= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : int= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Any= model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ) : Any= config_and_inputs lowercase__ : Tuple= { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) __lowerCamelCase = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' lowercase__ : Tuple= super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowercase__ : List[Any]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) lowercase__ : List[str]= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModelTester(self ) lowercase__ : List[str]= ConfigTester(self , config_class=snake_case__ , emb_dim=37 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[str]= FlaubertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__, lowercase__ : Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowercase__ : int= True lowercase__ : List[Any]= model_class(config=snake_case__ ) lowercase__ : str= self._prepare_for_class(snake_case__ , snake_case__ ) lowercase__ : Dict= torch.jit.trace( snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) ) lowercase__ : str= torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ ) loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) ) @require_torch class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) lowercase__ : Tuple= torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): lowercase__ : Optional[int]= model(snake_case__ )[0] lowercase__ : Optional[int]= torch.Size((1, 11, 768) ) self.assertEqual(output.shape , snake_case__ ) lowercase__ : Dict= torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
85
0
"""simple docstring""" import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= 0 @slow def UpperCAmelCase_ ( self ): '''simple docstring''' for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): lowercase__ : List[Any]= AutoTokenizer.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(UpperCamelCase__ ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): lowercase__ : int= AutoTokenizer.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(UpperCamelCase__ ) , 0 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= AutoTokenizer.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= AutoTokenizer.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= AutoConfig.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) # Check that tokenizer_type ≠ model_type lowercase__ : Dict= AutoTokenizer.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def UpperCAmelCase_ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(UpperCamelCase__ , "vocab.txt" ) ) lowercase__ : int= AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="bert" , use_fast=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.json" , os.path.join(UpperCamelCase__ , "vocab.json" ) ) shutil.copy("./tests/fixtures/merges.txt" , os.path.join(UpperCamelCase__ , "merges.txt" ) ) lowercase__ : Tuple= AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="gpt2" , use_fast=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) @require_tokenizers def UpperCAmelCase_ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(UpperCamelCase__ , "vocab.txt" ) ) lowercase__ : List[str]= AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="bert" ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.json" , os.path.join(UpperCamelCase__ , "vocab.json" ) ) shutil.copy("./tests/fixtures/merges.txt" , os.path.join(UpperCamelCase__ , "merges.txt" ) ) lowercase__ : List[str]= AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="gpt2" ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' with pytest.raises(UpperCamelCase__ ): AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" ) @require_tokenizers def UpperCAmelCase_ ( self ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: lowercase__ : Optional[int]= tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" ) self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase__ ) else: self.assertEqual(tokenizer.do_lower_case , UpperCamelCase__ ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def UpperCAmelCase_ ( self ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( UpperCamelCase__ , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ): lowercase__ : List[Any]= tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= TOKENIZER_MAPPING.values() lowercase__ : Union[str, Any]= [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(UpperCamelCase__ ) @require_tokenizers def UpperCAmelCase_ ( self ): '''simple docstring''' self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=UpperCamelCase__ ) , UpperCamelCase__ ) self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , UpperCamelCase__ ) @require_tokenizers def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=UpperCamelCase__ ) lowercase__ : Optional[Any]= "Hello, world. How are you?" lowercase__ : Optional[Any]= tokenizer.tokenize(UpperCamelCase__ ) self.assertEqual("[UNK]" , tokens[0] ) lowercase__ : Optional[int]= AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=UpperCamelCase__ ) lowercase__ : Dict= tokenizer.tokenize(UpperCamelCase__ ) self.assertEqual("[UNK]" , tokens[0] ) @require_tokenizers def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" ) self.assertEqual(type(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30000 ) self.assertEqual(tokenizer.unk_token , "[UNK]" ) self.assertEqual(tokenizer.padding_side , "right" ) self.assertEqual(tokenizer.truncation_side , "right" ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= AutoTokenizer.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase__ ) lowercase__ : str= AutoTokenizer.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= AutoTokenizer.from_pretrained("ctrl" ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= get_tokenizer_config("bert-base-cased" ) lowercase__ : int= config.pop("_commit_hash" , UpperCamelCase__ ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(UpperCamelCase__ , {"do_lower_case": False} ) # This model does not have a tokenizer_config so we get back an empty dict. lowercase__ : Dict= get_tokenizer_config(UpperCamelCase__ ) self.assertDictEqual(UpperCamelCase__ , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. lowercase__ : List[Any]= AutoTokenizer.from_pretrained(UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase__ ) lowercase__ : Optional[int]= get_tokenizer_config(UpperCamelCase__ ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config["tokenizer_class"] , "BertTokenizer" ) def UpperCAmelCase_ ( self ): '''simple docstring''' try: AutoConfig.register("custom" , UpperCamelCase__ ) AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase__ ): AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ ) lowercase__ : Optional[Any]= CustomTokenizer.from_pretrained(UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase__ ) lowercase__ : List[str]= AutoTokenizer.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def UpperCAmelCase_ ( self ): '''simple docstring''' try: AutoConfig.register("custom" , UpperCamelCase__ ) # Can register in two steps AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase__ ): AutoTokenizer.register(UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ : List[str]= BertTokenizerFast.from_pretrained(UpperCamelCase__ ) bert_tokenizer.save_pretrained(UpperCamelCase__ ) lowercase__ : List[str]= CustomTokenizerFast.from_pretrained(UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase__ ) lowercase__ : Any= AutoTokenizer.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowercase__ : List[Any]= AutoTokenizer.from_pretrained(UpperCamelCase__ , use_fast=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def UpperCAmelCase_ ( self ): '''simple docstring''' with self.assertRaises(UpperCamelCase__ ): lowercase__ : Optional[Any]= AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCamelCase__ ): lowercase__ : int= AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase__ ) lowercase__ : Optional[Any]= AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase__ ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase__ ) lowercase__ : Union[str, Any]= AutoTokenizer.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" ) # Test we can also load the slow version lowercase__ : int= AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase__ ) lowercase__ : List[Any]= AutoTokenizer.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" ) @require_tokenizers def UpperCAmelCase_ ( self ): '''simple docstring''' class __UpperCAmelCase( _A ): """simple docstring""" __lowerCamelCase = False class __UpperCAmelCase( _A ): """simple docstring""" __lowerCamelCase = NewTokenizer __lowerCamelCase = False try: AutoConfig.register("custom" , UpperCamelCase__ ) AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ ) AutoTokenizer.register(UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ ) # If remote code is not set, the default is to use local lowercase__ : Optional[Any]= AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) self.assertFalse(tokenizer.special_attribute_present ) lowercase__ : str= AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=UpperCamelCase__ ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. lowercase__ : Optional[Any]= AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase__ ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) self.assertFalse(tokenizer.special_attribute_present ) lowercase__ : Optional[int]= AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub lowercase__ : int= AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase__ ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) self.assertTrue(tokenizer.special_attribute_present ) lowercase__ : Tuple= AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=UpperCamelCase__ ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) # Test we can also load the slow version lowercase__ : List[str]= AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) else: self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) def UpperCAmelCase_ ( self ): '''simple docstring''' with self.assertRaisesRegex( UpperCamelCase__ , "bert-base is not a local folder and is not a valid model identifier" ): lowercase__ : List[Any]= AutoTokenizer.from_pretrained("bert-base" ) def UpperCAmelCase_ ( self ): '''simple docstring''' with self.assertRaisesRegex( UpperCamelCase__ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): lowercase__ : Tuple= AutoTokenizer.from_pretrained(UpperCamelCase__ , revision="aaaaaa" ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) with RequestCounter() as counter: lowercase__ : List[Any]= AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
710
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 __lowerCamelCase = None class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 2 @register_to_config def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ): '''simple docstring''' # standard deviation of the initial noise distribution lowercase__ : int= sigma_max # setable values lowercase__ : int= None lowercase__ : np.IntTensor= None lowercase__ : torch.FloatTensor= None # sigma(t_i) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' return sample def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' lowercase__ : List[Any]= num_inference_steps lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy() lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ ) lowercase__ : Union[str, Any]= [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: lowercase__ : str= 0 # sample eps ~ N(0, S_noise^2 * I) lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device ) lowercase__ : str= sigma + gamma * sigma lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ): '''simple docstring''' lowercase__ : int= sample_prev + sigma_prev * model_output lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' raise NotImplementedError()
85
0
"""simple docstring""" import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex a : Optional[int] = logging.getLogger(__name__) class __UpperCAmelCase: """simple docstring""" def __init__( self ): '''simple docstring''' lowercase__ : Optional[int]= False def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if not self.initialized: lowercase__ : List[str]= RagRetriever( _A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , ) lowercase__ : List[str]= True def UpperCAmelCase_ ( self ): '''simple docstring''' self.retriever.index.init_index() def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' lowercase__ : str= self.retriever._main_retrieve(_A , _A ) return doc_ids, retrieved_doc_embeds class __UpperCAmelCase( snake_case__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ): '''simple docstring''' if index is not None and index.is_initialized() and len(_A ) > 0: raise ValueError( "When using Ray for distributed fine-tuning, " "you\'ll need to provide the paths instead, " "as the dataset and the index are loaded " "separately. More info in examples/rag/use_own_knowledge_dataset.py " ) super().__init__( _A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , ) lowercase__ : int= retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(_A , _A , _A , _A ) for worker in self.retrieval_workers ] ) def UpperCAmelCase_ ( self ): '''simple docstring''' logger.info("initializing retrieval" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. lowercase__ : List[str]= self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] lowercase__ : List[str]= ray.get(random_worker.retrieve.remote(_A , _A ) ) else: lowercase__ : Optional[int]= self._main_retrieve(_A , _A ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_A ) @classmethod def UpperCAmelCase_ ( cls , snake_case__ , snake_case__=None , **snake_case__ ): '''simple docstring''' return super(_A , cls ).get_tokenizers(_A , _A , **_A ) @classmethod def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ): '''simple docstring''' lowercase__ : List[str]= kwargs.pop("config" , _A ) or RagConfig.from_pretrained(_A , **_A ) lowercase__ : str= RagTokenizer.from_pretrained(_A , config=_A ) lowercase__ : Any= rag_tokenizer.question_encoder lowercase__ : List[Any]= rag_tokenizer.generator if indexed_dataset is not None: lowercase__ : str= 'custom' lowercase__ : Dict= CustomHFIndex(config.retrieval_vector_size , _A ) else: lowercase__ : Any= cls._build_index(_A ) return cls( _A , question_encoder_tokenizer=_A , generator_tokenizer=_A , retrieval_workers=_A , index=_A , )
711
"""simple docstring""" from ....utils import logging a : List[str] = logging.get_logger(__name__) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ): '''simple docstring''' lowercase__ : Dict= config.__dict__ lowercase__ : str= modal_hidden_size if num_labels: lowercase__ : List[str]= num_labels
85
0
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin a : Any = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right a : Tuple = 250004 a : Any = 250020 @require_sentencepiece @require_tokenizers class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" __lowerCamelCase = MBartaaTokenizer __lowerCamelCase = MBartaaTokenizerFast __lowerCamelCase = True __lowerCamelCase = True def UpperCAmelCase_ ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase__ : Dict= MBartaaTokenizer(__a , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__a ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= "<s>" lowercase__ : str= 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(__a ) , 1054 ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= MBartaaTokenizer(__a , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__a ) lowercase__ : Dict= tokenizer.tokenize("This is a test" ) self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowercase__ : Tuple= tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) lowercase__ : Optional[Any]= tokenizer.convert_tokens_to_ids(__a ) self.assertListEqual( __a , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowercase__ : Dict= tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual( __a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= {"input_ids": [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , ) def UpperCAmelCase_ ( self ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowercase__ : Optional[Any]= (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase__ : str= self.rust_tokenizer_class.from_pretrained(__a , **__a ) lowercase__ : List[Any]= self.tokenizer_class.from_pretrained(__a , **__a ) lowercase__ : List[Any]= tempfile.mkdtemp() lowercase__ : List[Any]= tokenizer_r.save_pretrained(__a ) lowercase__ : Union[str, Any]= tokenizer_p.save_pretrained(__a ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) lowercase__ : List[Any]= tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(__a , __a ) # Checks everything loads correctly in the same way lowercase__ : int= tokenizer_r.from_pretrained(__a ) lowercase__ : int= tokenizer_p.from_pretrained(__a ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__a , __a ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__a ) # Save tokenizer rust, legacy_format=True lowercase__ : List[Any]= tempfile.mkdtemp() lowercase__ : int= tokenizer_r.save_pretrained(__a , legacy_format=__a ) lowercase__ : List[Any]= tokenizer_p.save_pretrained(__a ) # Checks it save with the same files self.assertSequenceEqual(__a , __a ) # Checks everything loads correctly in the same way lowercase__ : List[Any]= tokenizer_r.from_pretrained(__a ) lowercase__ : Dict= tokenizer_p.from_pretrained(__a ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__a , __a ) ) shutil.rmtree(__a ) # Save tokenizer rust, legacy_format=False lowercase__ : int= tempfile.mkdtemp() lowercase__ : Any= tokenizer_r.save_pretrained(__a , legacy_format=__a ) lowercase__ : Optional[Any]= tokenizer_p.save_pretrained(__a ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowercase__ : int= tokenizer_r.from_pretrained(__a ) lowercase__ : Dict= tokenizer_p.from_pretrained(__a ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__a , __a ) ) shutil.rmtree(__a ) @require_torch @require_sentencepiece @require_tokenizers class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" __lowerCamelCase = "facebook/mbart-large-50-one-to-many-mmt" __lowerCamelCase = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] __lowerCamelCase = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] __lowerCamelCase = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2] @classmethod def UpperCAmelCase_ ( cls ): '''simple docstring''' lowercase__ : Tuple= MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" ) lowercase__ : str= 1 return cls def UpperCAmelCase_ ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250038 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __a ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.assertIn(__a , self.tokenizer.all_special_ids ) lowercase__ : Any= [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] lowercase__ : List[Any]= self.tokenizer.decode(__a , skip_special_tokens=__a ) lowercase__ : Any= self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a ) self.assertEqual(__a , __a ) self.assertNotIn(self.tokenizer.eos_token , __a ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , __a ) lowercase__ : List[Any]= 10 lowercase__ : Optional[int]= self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0] self.assertEqual(ids[0] , __a ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(__a ) , __a ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250053, 250001] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= tempfile.mkdtemp() lowercase__ : Tuple= self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__a ) lowercase__ : Any= MBartaaTokenizer.from_pretrained(__a ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a ) @require_torch def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="pt" ) lowercase__ : str= shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.tokenizer( self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) lowercase__ : Dict= shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) self.assertIsInstance(__a , __a ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) lowercase__ : Union[str, Any]= batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , __a ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[str]= self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="pt" ) lowercase__ : List[Any]= self.tokenizer( text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="pt" ) lowercase__ : List[Any]= targets["input_ids"] lowercase__ : Any= shift_tokens_right(__a , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" ) self.assertEqual( nested_simplify(__a ) , { # en_XX, A, test, EOS "input_ids": [[250004, 62, 3034, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, } , )
712
"""simple docstring""" import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase__(A ) ->int: """simple docstring""" lowercase__ : Optional[int]= [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def lowercase__(A , A ) ->Any: """simple docstring""" lowercase__ : Any= [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def lowercase__(A ) ->List[Any]: """simple docstring""" lowercase__ : Dict= [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") ) return token def lowercase__() ->Union[str, Any]: """simple docstring""" lowercase__ : Dict= [] head.append(("layernorm.weight", "norm.weight") ) head.append(("layernorm.bias", "norm.bias") ) head.append(("classifier.weight", "head.weight") ) head.append(("classifier.bias", "head.bias") ) return head def lowercase__(A , A , A , A ) ->Optional[int]: """simple docstring""" lowercase__ : List[str]= "imagenet-1k-id2label.json" lowercase__ : List[str]= 1_000 lowercase__ : Tuple= "huggingface/label-files" lowercase__ : int= num_labels lowercase__ : int= json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) ) lowercase__ : str= {int(A ): v for k, v in idalabel.items()} lowercase__ : Optional[int]= idalabel lowercase__ : Union[str, Any]= {v: k for k, v in idalabel.items()} lowercase__ : Tuple= CvtConfig(num_labels=A , idalabel=A , labelaid=A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13": lowercase__ : int= [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21": lowercase__ : Union[str, Any]= [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Optional[Any]= [2, 2, 20] lowercase__ : Optional[Any]= [3, 12, 16] lowercase__ : List[str]= [192, 768, 1_024] lowercase__ : List[str]= CvtForImageClassification(A ) lowercase__ : Any= AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) lowercase__ : Dict= image_size lowercase__ : int= torch.load(A , map_location=torch.device("cpu" ) ) lowercase__ : Optional[Any]= OrderedDict() lowercase__ : Tuple= [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowercase__ : Optional[int]= list_of_state_dict + cls_token(A ) lowercase__ : List[str]= list_of_state_dict + embeddings(A ) for cnt in range(config.depth[idx] ): lowercase__ : Dict= list_of_state_dict + attention(A , A ) lowercase__ : Optional[Any]= list_of_state_dict + final() for gg in list_of_state_dict: print(A ) for i in range(len(A ) ): lowercase__ : str= original_weights[list_of_state_dict[i][1]] model.load_state_dict(A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": a : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--cvt_model""", default="""cvt-w24""", type=str, help="""Name of the cvt model you'd like to convert.""", ) parser.add_argument( """--image_size""", default=384, type=int, help="""Input Image Size""", ) parser.add_argument( """--cvt_file_name""", default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""", type=str, help="""Input Image Size""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a : Optional[int] = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
85
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor a : str = logging.get_logger(__name__) class __UpperCAmelCase( lowercase_ ): """simple docstring""" def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' warnings.warn( "The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use GLPNImageProcessor instead." , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
713
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 def __init__( self , snake_case__ , snake_case__ ): '''simple docstring''' super().__init__() self.register_modules(unet=snake_case__ , scheduler=snake_case__ ) @torch.no_grad() def __call__( self , snake_case__ = 1 , snake_case__ = 2000 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , **snake_case__ , ): '''simple docstring''' lowercase__ : Optional[Any]= self.unet.config.sample_size lowercase__ : Dict= (batch_size, 3, img_size, img_size) lowercase__ : List[Any]= self.unet lowercase__ : Tuple= randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma lowercase__ : Tuple= sample.to(self.device ) self.scheduler.set_timesteps(snake_case__ ) self.scheduler.set_sigmas(snake_case__ ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[Any]= self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): lowercase__ : List[Any]= self.unet(snake_case__ , snake_case__ ).sample lowercase__ : List[Any]= self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample # prediction step lowercase__ : List[str]= model(snake_case__ , snake_case__ ).sample lowercase__ : Tuple= self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ) lowercase__, lowercase__ : Tuple= output.prev_sample, output.prev_sample_mean lowercase__ : List[str]= sample_mean.clamp(0 , 1 ) lowercase__ : Union[str, Any]= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ : str= self.numpy_to_pil(snake_case__ ) if not return_dict: return (sample,) return ImagePipelineOutput(images=snake_case__ )
85
0
from __future__ import annotations def lowercase__(A , A , A , A ) ->Union[str, Any]: """simple docstring""" if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): lowercase__, lowercase__ : int= array[indexa], array[indexa] def lowercase__(A , A , A , A ) ->Tuple: """simple docstring""" if length > 1: lowercase__ : Any= int(length / 2 ) for i in range(A__ , low + middle ): comp_and_swap(A__ , A__ , i + middle , A__ ) bitonic_merge(A__ , A__ , A__ , A__ ) bitonic_merge(A__ , low + middle , A__ , A__ ) def lowercase__(A , A , A , A ) ->Dict: """simple docstring""" if length > 1: lowercase__ : str= int(length / 2 ) bitonic_sort(A__ , A__ , A__ , 1 ) bitonic_sort(A__ , low + middle , A__ , 0 ) bitonic_merge(A__ , A__ , A__ , A__ ) if __name__ == "__main__": a : Optional[Any] = input("""Enter numbers separated by a comma:\n""").strip() a : Optional[int] = [int(item.strip()) for item in user_input.split(""",""")] bitonic_sort(unsorted, 0, len(unsorted), 1) print("""\nSorted array in ascending order is: """, end="""""") print(*unsorted, sep=""", """) bitonic_merge(unsorted, 0, len(unsorted), 0) print("""Sorted array in descending order is: """, end="""""") print(*unsorted, sep=""", """)
714
"""simple docstring""" def lowercase__(A ) ->list[int]: """simple docstring""" lowercase__ : List[str]= len(A ) for i in range(A ): for j in range(i + 1 , A ): if numbers[j] < numbers[i]: lowercase__, lowercase__ : List[str]= numbers[j], numbers[i] return numbers if __name__ == "__main__": a : Dict = input("""Enter numbers separated by a comma:\n""").strip() a : List[str] = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
85
0