code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() __A : int = { 'bart': ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'bert': ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-base-cased-finetuned-mrpc': ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'dpr': ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'gpt2': ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlnet': ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlm': ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlm-roberta': ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'transfo-xl': ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'openai-gpt': ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'roberta': ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'layoutlm': ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'roberta-large-mnli': ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'camembert': ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'flaubert': ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'distilbert': ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'distilbert-base-distilled-squad': ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'lxmert': ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'lxmert-visual-feature-encoder': ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'ctrl': ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'albert': ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 't5': ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'electra': ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'wav2vec2': ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def UpperCamelCase_ ( A__ : Tuple , A__ : Any , A__ : Optional[int] , A__ : Optional[int] , A__ : Dict=False , A__ : List[Any]=True ): '''simple docstring''' if model_type not in MODEL_CLASSES: raise ValueError(f'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' ) lowerCAmelCase_ : Tuple = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowerCAmelCase_ : List[str] = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models ) lowerCAmelCase_ : Optional[Any] = config_class.from_json_file(UpperCAmelCase_ ) lowerCAmelCase_ : List[Any] = True lowerCAmelCase_ : List[str] = True print(f'Building TensorFlow model from configuration: {config}' ) lowerCAmelCase_ : Optional[int] = model_class(UpperCAmelCase_ ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowerCAmelCase_ : str = cached_file( UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowerCAmelCase_ : Any = load_pytorch_checkpoint_in_tfa_model(UpperCAmelCase_ , UpperCAmelCase_ ) if compare_with_pt_model: lowerCAmelCase_ : Optional[int] = tf_model(tf_model.dummy_inputs , training=UpperCAmelCase_ ) # build the network lowerCAmelCase_ : List[Any] = torch.load(UpperCAmelCase_ , map_location="""cpu""" ) lowerCAmelCase_ : List[str] = pt_model_class.from_pretrained( pretrained_model_name_or_path=UpperCAmelCase_ , config=UpperCAmelCase_ , state_dict=UpperCAmelCase_ ) with torch.no_grad(): lowerCAmelCase_ : Tuple = pt_model(**pt_model.dummy_inputs ) lowerCAmelCase_ : int = pto[0].numpy() lowerCAmelCase_ : Optional[Any] = tfo[0].numpy() lowerCAmelCase_ : Dict = np.amax(np.abs(np_pt - np_tf ) ) print(f'Max absolute difference between models outputs {diff}' ) assert diff <= 2E-2, f'Error, model absolute difference is >2e-2: {diff}' # Save pytorch-model print(f'Save TensorFlow model to {tf_dump_path}' ) tf_model.save_weights(UpperCAmelCase_ , save_format="""h5""" ) def UpperCamelCase_ ( A__ : Optional[Any] , A__ : Optional[Any] , A__ : List[Any]=None , A__ : Any=None , A__ : str=False , A__ : int=False , A__ : Any=False , A__ : Optional[int]=False , ): '''simple docstring''' if args_model_type is None: lowerCAmelCase_ : Optional[Any] = list(MODEL_CLASSES.keys() ) else: lowerCAmelCase_ : Optional[int] = [args_model_type] for j, model_type in enumerate(UpperCAmelCase_ , start=1 ): print("""=""" * 1_00 ) print(f' Converting model type {j}/{len(UpperCAmelCase_ )}: {model_type}' ) print("""=""" * 1_00 ) if model_type not in MODEL_CLASSES: raise ValueError(f'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' ) lowerCAmelCase_ : int = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowerCAmelCase_ : Union[str, Any] = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowerCAmelCase_ : str = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(UpperCAmelCase_ , UpperCAmelCase_ ) , start=1 ): print("""-""" * 1_00 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f' Skipping finetuned checkpoint {model_shortcut_name}' ) continue lowerCAmelCase_ : Union[str, Any] = model_shortcut_name elif only_convert_finetuned_models: print(f' Skipping not finetuned checkpoint {model_shortcut_name}' ) continue print( f' Converting checkpoint {i}/{len(UpperCAmelCase_ )}: {model_shortcut_name} - model_type {model_type}' ) print("""-""" * 1_00 ) if config_shortcut_name in aws_config_map: lowerCAmelCase_ : Tuple = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models ) else: lowerCAmelCase_ : List[Any] = config_shortcut_name if model_shortcut_name in aws_model_maps: lowerCAmelCase_ : Union[str, Any] = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models ) else: lowerCAmelCase_ : int = model_shortcut_name if os.path.isfile(UpperCAmelCase_ ): lowerCAmelCase_ : Dict = 'converted_model' convert_pt_checkpoint_to_tf( model_type=UpperCAmelCase_ , pytorch_checkpoint_path=UpperCAmelCase_ , config_file=UpperCAmelCase_ , tf_dump_path=os.path.join(UpperCAmelCase_ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=UpperCAmelCase_ , ) if remove_cached_files: os.remove(UpperCAmelCase_ ) os.remove(UpperCAmelCase_ ) if __name__ == "__main__": __A : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file." ) parser.add_argument( "--model_type", default=None, type=str, help=( F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ''' "convert all the models from AWS." ), ) parser.add_argument( "--pytorch_checkpoint_path", default=None, type=str, help=( "Path to the PyTorch checkpoint path or shortcut name to download from AWS. " "If not given, will download and convert all the checkpoints from AWS." ), ) parser.add_argument( "--config_file", default=None, type=str, help=( "The config json file corresponding to the pre-trained model. \n" "This specifies the model architecture. If not given and " "--pytorch_checkpoint_path is not given or is a shortcut name " "use the configuration associated to the shortcut name on the AWS" ), ) parser.add_argument( "--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions." ) parser.add_argument( "--use_cached_models", action="store_true", help="Use cached models if possible instead of updating to latest checkpoint versions.", ) parser.add_argument( "--remove_cached_files", action="store_true", help="Remove pytorch models after conversion (save memory when converting in batches).", ) parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.") __A : Tuple = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
120
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) snake_case_ : str = logging.getLogger(__name__) def A__ ( ): _UpperCamelCase : List[Any] = argparse.ArgumentParser( description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' ) parser.add_argument('--file_path' , type=UpperCAmelCase_ , default='data/dump.txt' , help='The path to the data.' ) parser.add_argument('--tokenizer_type' , type=UpperCAmelCase_ , default='bert' , choices=['bert', 'roberta', 'gpt2'] ) parser.add_argument('--tokenizer_name' , type=UpperCAmelCase_ , default='bert-base-uncased' , help='The tokenizer to use.' ) parser.add_argument('--dump_file' , type=UpperCAmelCase_ , default='data/dump' , help='The dump file prefix.' ) _UpperCamelCase : Any = parser.parse_args() logger.info(f'Loading Tokenizer ({args.tokenizer_name})' ) if args.tokenizer_type == "bert": _UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained(args.tokenizer_name ) _UpperCamelCase : Optional[int] = tokenizer.special_tokens_map['cls_token'] # `[CLS]` _UpperCamelCase : Dict = tokenizer.special_tokens_map['sep_token'] # `[SEP]` elif args.tokenizer_type == "roberta": _UpperCamelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name ) _UpperCamelCase : Any = tokenizer.special_tokens_map['cls_token'] # `<s>` _UpperCamelCase : int = tokenizer.special_tokens_map['sep_token'] # `</s>` elif args.tokenizer_type == "gpt2": _UpperCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(args.tokenizer_name ) _UpperCamelCase : Optional[Any] = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>` _UpperCamelCase : Any = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>` logger.info(f'Loading text from {args.file_path}' ) with open(args.file_path , 'r' , encoding='utf8' ) as fp: _UpperCamelCase : List[Any] = fp.readlines() logger.info('Start encoding' ) logger.info(f'{len(UpperCAmelCase_ )} examples to process.' ) _UpperCamelCase : int = [] _UpperCamelCase : Any = 0 _UpperCamelCase : Any = 1_0_0_0_0 _UpperCamelCase : Optional[Any] = time.time() for text in data: _UpperCamelCase : List[Any] = f'{bos} {text.strip()} {sep}' _UpperCamelCase : Any = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) rslt.append(UpperCAmelCase_ ) iter += 1 if iter % interval == 0: _UpperCamelCase : Union[str, Any] = time.time() logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' ) _UpperCamelCase : Tuple = time.time() logger.info('Finished binarization' ) logger.info(f'{len(UpperCAmelCase_ )} examples processed.' ) _UpperCamelCase : Optional[int] = f'{args.dump_file}.{args.tokenizer_name}.pickle' _UpperCamelCase : List[str] = tokenizer.vocab_size if vocab_size < (1 << 1_6): _UpperCamelCase : List[Any] = [np.uintaa(UpperCAmelCase_ ) for d in rslt] else: _UpperCamelCase : Any = [np.intaa(UpperCAmelCase_ ) for d in rslt] random.shuffle(rslt_ ) logger.info(f'Dump to {dp_file}' ) with open(UpperCAmelCase_ , 'wb' ) as handle: pickle.dump(rslt_ , UpperCAmelCase_ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
83
0
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : list[int] ) -> int: '''simple docstring''' _A = len(_snake_case ) // 2 # choose the middle 3 elements _A = lst[m - 1 : m + 2] # if middle element is peak if three[1] > three[0] and three[1] > three[2]: return three[1] # if increasing, recurse on right elif three[0] < three[2]: if len(lst[:m] ) == 2: m -= 1 return peak(lst[m:] ) # decreasing else: if len(lst[:m] ) == 2: m += 1 return peak(lst[:m] ) if __name__ == "__main__": import doctest doctest.testmod()
271
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def _snake_case ( _snake_case : Dict ) -> Any: '''simple docstring''' if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X2_0000 and cp <= 0X2_a6df) # or (cp >= 0X2_a700 and cp <= 0X2_b73f) # or (cp >= 0X2_b740 and cp <= 0X2_b81f) # or (cp >= 0X2_b820 and cp <= 0X2_ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2_f800 and cp <= 0X2_fa1f) # ): # return True return False def _snake_case ( _snake_case : str ) -> Tuple: '''simple docstring''' for char in word: _A = ord(_snake_case ) if not _is_chinese_char(_snake_case ): return 0 return 1 def _snake_case ( _snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' _A = set() for token in tokens: _A = len(_snake_case ) > 1 and is_chinese(_snake_case ) if chinese_word: word_set.add(_snake_case ) _A = list(_snake_case ) return word_list def _snake_case ( _snake_case : List[str] , _snake_case : set() ) -> Optional[Any]: '''simple docstring''' if not chinese_word_set: return bert_tokens _A = max([len(_snake_case ) for w in chinese_word_set] ) _A = bert_tokens _A , _A = 0, len(_snake_case ) while start < end: _A = True if is_chinese(bert_word[start] ): _A = min(end - start , _snake_case ) for i in range(_snake_case , 1 , -1 ): _A = ''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): _A = '##' + bert_word[j] _A = start + i _A = False break if single_word: start += 1 return bert_word def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ) -> str: '''simple docstring''' _A = [] for i in range(0 , len(_snake_case ) , 1_00 ): _A = ltp_tokenizer.seg(lines[i : i + 1_00] )[0] _A = [get_chinese_word(_snake_case ) for r in res] ltp_res.extend(_snake_case ) assert len(_snake_case ) == len(_snake_case ) _A = [] for i in range(0 , len(_snake_case ) , 1_00 ): _A = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=5_12 ) bert_res.extend(res['input_ids'] ) assert len(_snake_case ) == len(_snake_case ) _A = [] for input_ids, chinese_word in zip(_snake_case , _snake_case ): _A = [] for id in input_ids: _A = bert_tokenizer._convert_id_to_token(_snake_case ) input_tokens.append(_snake_case ) _A = add_sub_symbol(_snake_case , _snake_case ) _A = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_snake_case ): if token[:2] == "##": _A = token[2:] # save chinese tokens' pos if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ): ref_id.append(_snake_case ) ref_ids.append(_snake_case ) assert len(_snake_case ) == len(_snake_case ) return ref_ids def _snake_case ( _snake_case : List[str] ) -> Dict: '''simple docstring''' with open(args.file_name , 'r' , encoding='utf-8' ) as f: _A = f.readlines() _A = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _A = LTP(args.ltp ) # faster in GPU device _A = BertTokenizer.from_pretrained(args.bert ) _A = prepare_ref(_snake_case , _snake_case , _snake_case ) with open(args.save_path , 'w' , encoding='utf-8' ) as f: _A = [json.dumps(_snake_case ) + '\n' for ref in ref_ids] f.writelines(_snake_case ) if __name__ == "__main__": a = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') a = parser.parse_args() main(args)
271
1
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def snake_case_ ( lowerCAmelCase_ : int = 8 ): __lowercase : str = ascii_letters + digits + punctuation return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) ) def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int ): # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(lowerCAmelCase_ ) __lowercase : List[Any] = i // 3 __lowercase : int = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) __lowercase : str = ( chars_incl + random(lowerCAmelCase_ , quotient + remainder ) + random(lowerCAmelCase_ , lowerCAmelCase_ ) + random(lowerCAmelCase_ , lowerCAmelCase_ ) ) __lowercase : int = list(lowerCAmelCase_ ) shuffle(lowerCAmelCase_ ) return "".join(lowerCAmelCase_ ) # random is a generalised function for letters, characters and numbers def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int ): return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) ) def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int ): pass # Put your code here... def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ): pass # Put your code here... def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ): pass # Put your code here... def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int = 8 ): if len(lowerCAmelCase_ ) < min_length: # Your Password must be at least 8 characters long return False __lowercase : Tuple = any(char in ascii_uppercase for char in password ) __lowercase : Union[str, Any] = any(char in ascii_lowercase for char in password ) __lowercase : Dict = any(char in digits for char in password ) __lowercase : Tuple = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def snake_case_ ( ): __lowercase : Union[str, Any] = int(input("""Please indicate the max length of your password: """ ).strip() ) __lowercase : List[str] = input( """Please indicate the characters that must be in your password: """ ).strip() print("""Password generated:""" , password_generator(lowerCAmelCase_ ) ) print( """Alternative Password generated:""" , alternative_password_generator(lowerCAmelCase_ , lowerCAmelCase_ ) , ) print("""[If you are thinking of using this passsword, You better save it.]""" ) if __name__ == "__main__": main()
233
lowerCamelCase : Dict = '''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
233
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor A_ = logging.get_logger(__name__) class lowercase( __a ): '''simple docstring''' def __init__( self: int, *a_: Optional[Any], **a_: Union[str, Any] ): '''simple docstring''' warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""", a_, ) super().__init__(*a_, **a_ )
132
"""simple docstring""" import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : int=False ): """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( """Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise if not is_sharded: _snake_case : Dict = os.path.abspath(snake_case__ ) logger.info(F"Loading PyTorch weights from {pt_path}" ) _snake_case : Tuple = torch.load(snake_case__ , map_location="""cpu""" ) logger.info(F"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." ) _snake_case : int = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files _snake_case : Dict = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ ) return flax_state_dict def UpperCAmelCase__ (snake_case__ : Tuple[str] , snake_case__ : np.ndarray , snake_case__ : Dict[str, jnp.ndarray] , snake_case__ : str , ): """simple docstring""" def is_key_or_prefix_key_in_dict(snake_case__ : Tuple[str] ) -> bool: return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm _snake_case : Any = pt_tuple_key[:-1] + ("""scale""",) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean _snake_case : Optional[Any] = pt_tuple_key[:-1] + ("""mean""",) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var _snake_case : Any = pt_tuple_key[:-1] + ("""var""",) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # embedding _snake_case : Any = pt_tuple_key[:-1] + ("""embedding""",) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # conv layer _snake_case : Optional[int] = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ): _snake_case : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer _snake_case : List[str] = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ): _snake_case : List[Any] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight _snake_case : List[Any] = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias _snake_case : Tuple = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 _snake_case : Optional[Any] = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): _snake_case : Union[str, Any] = pt_tuple_key[-2] + """_g""" elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): _snake_case : Dict = pt_tuple_key[-2] + """_v""" if name is not None: _snake_case : Union[str, Any] = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ): """simple docstring""" _snake_case : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()} _snake_case : int = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: _snake_case : Dict = flax_model.params["""params"""] else: _snake_case : List[Any] = flax_model.params _snake_case : Tuple = flatten_dict(snake_case__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: _snake_case : Union[str, Any] = flatten_dict(flax_model.params["""batch_stats"""] ) random_flax_state_dict.update(snake_case__ ) _snake_case : Tuple = {} _snake_case : Dict = (model_prefix not in flax_model_params) and ( model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) _snake_case : Optional[int] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): _snake_case : int = tuple(pt_key.split(""".""" ) ) # remove base model prefix if necessary _snake_case : Optional[Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: _snake_case : Union[str, Any] = pt_tuple_key[1:] # Correctly rename weight parameters _snake_case , _snake_case : int = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary _snake_case : Dict = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: _snake_case : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: _snake_case : Union[str, Any] = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown _snake_case : List[Any] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown _snake_case : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Dict ): """simple docstring""" import torch # Load the index _snake_case : str = {} for shard_file in shard_filenames: # load using msgpack utils _snake_case : Union[str, Any] = torch.load(snake_case__ ) _snake_case : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} _snake_case : List[str] = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: _snake_case : str = flax_model.params["""params"""] _snake_case : List[Any] = flatten_dict(snake_case__ ) random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) ) else: _snake_case : List[Any] = flax_model.params _snake_case : Tuple = flatten_dict(snake_case__ ) _snake_case : Tuple = (model_prefix not in flax_model_params) and ( model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) _snake_case : Optional[Any] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): _snake_case : List[str] = tuple(pt_key.split(""".""" ) ) # remove base model prefix if necessary _snake_case : str = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: _snake_case : Optional[Any] = pt_tuple_key[1:] # Correctly rename weight parameters _snake_case , _snake_case : Optional[Any] = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary _snake_case : List[str] = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: _snake_case : Any = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: _snake_case : Optional[int] = jnp.asarray(snake_case__ ) continue if "var" in flax_key[-1]: _snake_case : Any = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown _snake_case : List[str] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown _snake_case : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] ): """simple docstring""" _snake_case : Optional[Any] = os.path.abspath(snake_case__ ) logger.info(F"Loading Flax weights from {flax_checkpoint_path}" ) # import correct flax class _snake_case : Union[str, Any] = getattr(snake_case__ , """Flax""" + model.__class__.__name__ ) # load flax weight dict with open(snake_case__ , """rb""" ) as state_f: try: _snake_case : Dict = from_bytes(snake_case__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(F"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Optional[int] ): """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( """Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights _snake_case : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) _snake_case : Optional[int] = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) _snake_case : Dict = flatten_dict(snake_case__ ) _snake_case : Optional[Any] = pt_model.state_dict() _snake_case : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()} ) _snake_case : Optional[int] = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys _snake_case : str = [] _snake_case : Tuple = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): _snake_case : Tuple = flax_key_tuple[0] == pt_model.base_model_prefix _snake_case : Optional[Any] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: _snake_case : List[str] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: _snake_case : Union[str, Any] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict: # conv layer _snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",) _snake_case : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict: # linear layer _snake_case : Optional[int] = flax_key_tuple[:-1] + ("""weight""",) _snake_case : Union[str, Any] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: _snake_case : int = flax_key_tuple[:-1] + ("""weight""",) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: _snake_case : Tuple = flax_key_tuple[:-1] + ("""running_mean""",) elif "var" in flax_key_tuple[-1]: _snake_case : Optional[int] = flax_key_tuple[:-1] + ("""running_var""",) if "batch_stats" in flax_state: _snake_case : int = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: _snake_case : int = """.""".join(snake_case__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. _snake_case : Optional[Any] = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: _snake_case : List[str] = key.split(""".""" ) _snake_case : Optional[int] = None if key_components[-3::2] == ["parametrizations", "original0"]: _snake_case : int = key_components[-2] + """_g""" elif key_components[-3::2] == ["parametrizations", "original1"]: _snake_case : Union[str, Any] = key_components[-2] + """_v""" if name is not None: _snake_case : Dict = key_components[:-3] + [name] _snake_case : Dict = """.""".join(snake_case__ ) _snake_case : str = key if flax_key in special_pt_names: _snake_case : Union[str, Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected " F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." ) else: # add weight to pytorch dict _snake_case : List[str] = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor _snake_case : List[Any] = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list _snake_case : List[str] = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) else: logger.warning(F"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" ) if len(snake_case__ ) > 0: logger.warning( F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly" F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" """ use it for predictions and inference.""" ) else: logger.warning( F"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n" """If your task is similar to the task the model of the checkpoint was trained on, """ F"you can already use {pt_model.__class__.__name__} for predictions without further training." ) return pt_model
132
1
'''simple docstring''' import torch from torch import nn class _UpperCamelCase ( nn.Module ): '''simple docstring''' def __init__( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int]=1 , _lowerCAmelCase : int=False): '''simple docstring''' super().__init__() __lowercase =n_token __lowercase =d_embed __lowercase =d_proj __lowercase =cutoffs + [n_token] __lowercase =[0] + self.cutoffs __lowercase =div_val __lowercase =self.cutoffs[0] __lowercase =len(self.cutoffs) - 1 __lowercase =self.shortlist_size + self.n_clusters if self.n_clusters > 0: __lowercase =nn.Parameter(torch.zeros(self.n_clusters , self.d_embed)) __lowercase =nn.Parameter(torch.zeros(self.n_clusters)) __lowercase =nn.ModuleList() __lowercase =nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs)): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCAmelCase , _lowerCAmelCase))) else: self.out_projs.append(_lowerCAmelCase) self.out_layers.append(nn.Linear(_lowerCAmelCase , _lowerCAmelCase)) else: for i in range(len(self.cutoffs)): __lowercase , __lowercase =self.cutoff_ends[i], self.cutoff_ends[i + 1] __lowercase =d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCAmelCase , _lowerCAmelCase))) self.out_layers.append(nn.Linear(_lowerCAmelCase , r_idx - l_idx)) __lowercase =keep_order def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict): '''simple docstring''' if proj is None: __lowercase =nn.functional.linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: __lowercase =nn.functional.linear(_lowerCAmelCase , proj.t().contiguous()) __lowercase =nn.functional.linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : List[str]=False): '''simple docstring''' if labels is not None: # Shift so that tokens < n predict n __lowercase =hidden[..., :-1, :].contiguous() __lowercase =labels[..., 1:].contiguous() __lowercase =hidden.view(-1 , hidden.size(-1)) __lowercase =labels.view(-1) if hidden.size(0) != labels.size(0): raise RuntimeError('Input and labels should have the same size in the batch dimension.') else: __lowercase =hidden.view(-1 , hidden.size(-1)) if self.n_clusters == 0: __lowercase =self._compute_logit(_lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0]) if labels is not None: __lowercase =labels != -1_0_0 __lowercase =torch.zeros_like(_lowerCAmelCase , dtype=hidden.dtype , device=hidden.device) __lowercase =( -nn.functional.log_softmax(_lowerCAmelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1) ) else: __lowercase =nn.functional.log_softmax(_lowerCAmelCase , dim=-1) else: # construct weights and biases __lowercase , __lowercase =[], [] for i in range(len(self.cutoffs)): if self.div_val == 1: __lowercase , __lowercase =self.cutoff_ends[i], self.cutoff_ends[i + 1] __lowercase =self.out_layers[0].weight[l_idx:r_idx] __lowercase =self.out_layers[0].bias[l_idx:r_idx] else: __lowercase =self.out_layers[i].weight __lowercase =self.out_layers[i].bias if i == 0: __lowercase =torch.cat([weight_i, self.cluster_weight] , dim=0) __lowercase =torch.cat([bias_i, self.cluster_bias] , dim=0) weights.append(_lowerCAmelCase) biases.append(_lowerCAmelCase) __lowercase , __lowercase , __lowercase =weights[0], biases[0], self.out_projs[0] __lowercase =self._compute_logit(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) __lowercase =nn.functional.log_softmax(_lowerCAmelCase , dim=1) if labels is None: __lowercase =hidden.new_empty((head_logit.size(0), self.n_token)) else: __lowercase =torch.zeros_like(_lowerCAmelCase , dtype=hidden.dtype , device=hidden.device) __lowercase =0 __lowercase =[0] + self.cutoffs for i in range(len(_lowerCAmelCase) - 1): __lowercase , __lowercase =cutoff_values[i], cutoff_values[i + 1] if labels is not None: __lowercase =(labels >= l_idx) & (labels < r_idx) __lowercase =mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue __lowercase =labels.index_select(0 , _lowerCAmelCase) - l_idx __lowercase =head_logprob.index_select(0 , _lowerCAmelCase) __lowercase =hidden.index_select(0 , _lowerCAmelCase) else: __lowercase =hidden if i == 0: if labels is not None: __lowercase =head_logprob_i.gather(1 , target_i[:, None]).squeeze(1) else: __lowercase =head_logprob[:, : self.cutoffs[0]] else: __lowercase , __lowercase , __lowercase =weights[i], biases[i], self.out_projs[i] __lowercase =self._compute_logit(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) __lowercase =nn.functional.log_softmax(_lowerCAmelCase , dim=1) __lowercase =self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: __lowercase =head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None]).squeeze(1) else: __lowercase =head_logprob[:, cluster_prob_idx, None] + tail_logprob_i __lowercase =logprob_i if labels is not None: if (hasattr(self , 'keep_order') and self.keep_order) or keep_order: out.index_copy_(0 , _lowerCAmelCase , -logprob_i) else: out[offset : offset + logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return out def __lowerCamelCase ( self : Dict , _lowerCAmelCase : Dict): '''simple docstring''' if self.n_clusters == 0: __lowercase =self._compute_logit(_lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0]) return nn.functional.log_softmax(_lowerCAmelCase , dim=-1) else: # construct weights and biases __lowercase , __lowercase =[], [] for i in range(len(self.cutoffs)): if self.div_val == 1: __lowercase , __lowercase =self.cutoff_ends[i], self.cutoff_ends[i + 1] __lowercase =self.out_layers[0].weight[l_idx:r_idx] __lowercase =self.out_layers[0].bias[l_idx:r_idx] else: __lowercase =self.out_layers[i].weight __lowercase =self.out_layers[i].bias if i == 0: __lowercase =torch.cat([weight_i, self.cluster_weight] , dim=0) __lowercase =torch.cat([bias_i, self.cluster_bias] , dim=0) weights.append(_lowerCAmelCase) biases.append(_lowerCAmelCase) __lowercase , __lowercase , __lowercase =weights[0], biases[0], self.out_projs[0] __lowercase =self._compute_logit(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) __lowercase =hidden.new_empty((head_logit.size(0), self.n_token)) __lowercase =nn.functional.log_softmax(_lowerCAmelCase , dim=1) __lowercase =[0] + self.cutoffs for i in range(len(_lowerCAmelCase) - 1): __lowercase , __lowercase =cutoff_values[i], cutoff_values[i + 1] if i == 0: __lowercase =head_logprob[:, : self.cutoffs[0]] else: __lowercase , __lowercase , __lowercase =weights[i], biases[i], self.out_projs[i] __lowercase =self._compute_logit(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) __lowercase =nn.functional.log_softmax(_lowerCAmelCase , dim=1) __lowercase =head_logprob[:, -i] + tail_logprob_i __lowercase =logprob_i return out
166
'''simple docstring''' from typing import List import numpy as np def _A ( _lowerCAmelCase ): """simple docstring""" __lowercase ={key: len(_lowerCAmelCase ) for key, value in gen_kwargs.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) __lowercase =max(lists_lengths.values() , default=0 ) return max(1 , _lowerCAmelCase ) def _A ( _lowerCAmelCase , _lowerCAmelCase ): """simple docstring""" __lowercase =[] for group_idx in range(_lowerCAmelCase ): __lowercase =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break __lowercase =shards_indices_per_group[-1].stop if shards_indices_per_group else 0 __lowercase =range(_lowerCAmelCase , start + num_shards_to_add ) shards_indices_per_group.append(_lowerCAmelCase ) return shards_indices_per_group def _A ( _lowerCAmelCase , _lowerCAmelCase ): """simple docstring""" __lowercase =_number_of_shards_in_gen_kwargs(_lowerCAmelCase ) if num_shards == 1: return [dict(_lowerCAmelCase )] else: __lowercase =_distribute_shards(num_shards=_lowerCAmelCase , max_num_jobs=_lowerCAmelCase ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(_lowerCAmelCase ) ) ] def _A ( _lowerCAmelCase ): """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , _lowerCAmelCase ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _A ( _lowerCAmelCase , _lowerCAmelCase ): """simple docstring""" __lowercase ={len(_lowerCAmelCase ) for value in gen_kwargs.values() if isinstance(_lowerCAmelCase , _lowerCAmelCase )} __lowercase ={} for size in list_sizes: __lowercase =list(range(_lowerCAmelCase ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes __lowercase =dict(_lowerCAmelCase ) for key, value in shuffled_kwargs.items(): if isinstance(_lowerCAmelCase , _lowerCAmelCase ): __lowercase =[value[i] for i in indices_per_size[len(_lowerCAmelCase )]] return shuffled_kwargs
166
1
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def snake_case__ ( __lowerCamelCase : Any ): """simple docstring""" lowerCamelCase__ : Tuple =os.path.join(args.tf_model_dir , '''parameters.json''' ) lowerCamelCase__ : List[str] =json.loads(open(__lowerCamelCase ).read() ) if not params: raise ValueError( f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' ) if not args.output.endswith('''.pt''' ): lowerCamelCase__ : int =args.output + '''.pt''' lowerCamelCase__ : Tuple =OrderedDict() with tf.device('''/CPU:0''' ): lowerCamelCase__ : str =tf.train.load_checkpoint(args.tf_model_dir ) lowerCamelCase__ : List[Any] =reader.get_variable_to_shape_map() for key_name in shapes.keys(): lowerCamelCase__ : Optional[int] =reader.get_tensor(__lowerCamelCase ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): lowerCamelCase__ : List[Any] =int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): lowerCamelCase__ : List[Any] =8 lowerCamelCase__ : str ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time lowerCamelCase__ : str =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : Any =torch.tensor(__lowerCamelCase ) elif key_name.startswith('''model/moe''' ): lowerCamelCase__ : List[str] =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): lowerCamelCase__ : Tuple ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player lowerCamelCase__ : Union[str, Any] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : Dict =torch.tensor(__lowerCamelCase ) elif key_name.endswith('''/softmlp/kernel''' ): lowerCamelCase__ : Union[str, Any] ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player lowerCamelCase__ : Tuple =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : str =torch.tensor(__lowerCamelCase ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): lowerCamelCase__ : str =key_name[-9:-7] for i in range(16 ): lowerCamelCase__ : str ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) lowerCamelCase__ : Union[str, Any] =( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided lowerCamelCase__ : Union[str, Any] =torch.tensor(__lowerCamelCase ) elif key_name.startswith('''model/mlp''' ): lowerCamelCase__ : Optional[int] =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): lowerCamelCase__ : int ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player lowerCamelCase__ : int =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : Tuple =torch.tensor(__lowerCamelCase ) elif key_name.endswith('''/p1/bias''' ): lowerCamelCase__ : List[str] ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player lowerCamelCase__ : List[str] =vnp.copy() # same because it is one dimensional lowerCamelCase__ : Union[str, Any] =torch.tensor(__lowerCamelCase ) elif key_name.endswith('''/p2/kernel''' ): lowerCamelCase__ : Tuple ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player lowerCamelCase__ : List[Any] =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : str =torch.tensor(__lowerCamelCase ) elif key_name.endswith('''/p2/bias''' ): lowerCamelCase__ : int ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player lowerCamelCase__ : Optional[Any] =vnp.copy() # same because it is one dimensional lowerCamelCase__ : Optional[Any] =torch.tensor(__lowerCamelCase ) elif key_name.startswith('''model/ln''' ): lowerCamelCase__ : str =int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowerCamelCase__ : List[str] ='''model.blocks.%d.feed_forward.norm.bias''' % player lowerCamelCase__ : Optional[Any] =vnp.copy() # same because it is one dimensional lowerCamelCase__ : Tuple =torch.tensor(__lowerCamelCase ) elif key_name.endswith('''/g''' ): lowerCamelCase__ : str ='''model.blocks.%d.feed_forward.norm.weight''' % player lowerCamelCase__ : Dict =vnp.copy() # same because it is one dimensional lowerCamelCase__ : Tuple =torch.tensor(__lowerCamelCase ) elif key_name.startswith('''model/att''' ): lowerCamelCase__ : Dict =int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): lowerCamelCase__ : Dict =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum lowerCamelCase__ : Union[str, Any] =state[:, 0, :, :] lowerCamelCase__ : Any =state[:, 1, :, :] lowerCamelCase__ : str =state[:, 2, :, :] lowerCamelCase__ : Optional[int] =( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : str =( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : Tuple =( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : Optional[int] ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player lowerCamelCase__ : str =torch.tensor(__lowerCamelCase ) lowerCamelCase__ : Dict ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player lowerCamelCase__ : Tuple =torch.tensor(__lowerCamelCase ) lowerCamelCase__ : Optional[int] ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player lowerCamelCase__ : Optional[Any] =torch.tensor(__lowerCamelCase ) elif key_name.endswith('''/o/kernel''' ): lowerCamelCase__ : Optional[Any] ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player lowerCamelCase__ : Optional[Any] =( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : Optional[int] =torch.tensor(__lowerCamelCase ) elif key_name.startswith('''model/an''' ): lowerCamelCase__ : Any =int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowerCamelCase__ : List[str] ='''model.blocks.%d.self_attn.norm.bias''' % player lowerCamelCase__ : str =vnp.copy() # same because it is one dimensional lowerCamelCase__ : Optional[Any] =torch.tensor(__lowerCamelCase ) elif key_name.endswith('''/g''' ): lowerCamelCase__ : Optional[Any] ='''model.blocks.%d.self_attn.norm.weight''' % player lowerCamelCase__ : Dict =vnp.copy() # same because it is one dimensional lowerCamelCase__ : str =torch.tensor(__lowerCamelCase ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): lowerCamelCase__ : int ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] lowerCamelCase__ : Optional[int] ='''model.%s.weight''' % nlayer lowerCamelCase__ : Dict =vnp.copy() # same in embedded lowerCamelCase__ : Dict =torch.tensor(__lowerCamelCase ) if key_name.startswith('''model/wte''' ): lowerCamelCase__ : List[Any] ='''lm_head.weight''' lowerCamelCase__ : Union[str, Any] =vnp.copy() # same in embedded lowerCamelCase__ : List[str] =torch.tensor(__lowerCamelCase ) elif key_name.startswith('''model/wob''' ): lowerCamelCase__ : int ='''final_logits_bias''' lowerCamelCase__ : List[str] =vnp.copy() # same in embedded lowerCamelCase__ : Union[str, Any] =state.reshape((1, -1) ) lowerCamelCase__ : List[str] =torch.tensor(__lowerCamelCase ) elif key_name == "model/dense/kernel": lowerCamelCase__ : List[str] ='''model.last_project.weight''' lowerCamelCase__ : Any =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowerCamelCase__ : str =torch.tensor(__lowerCamelCase ) elif key_name == "model/dense_1/bias": lowerCamelCase__ : Optional[int] ='''model.last_project.bias''' lowerCamelCase__ : List[str] =vnp.copy() # same because it is one dimensional lowerCamelCase__ : Any =torch.tensor(__lowerCamelCase ) torch.save(__lowerCamelCase , args.output ) if __name__ == "__main__": _lowercase : int = argparse.ArgumentParser( description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") _lowercase : int = parser.parse_args() convert_tf_gptsan_to_pt(args)
272
"""simple docstring""" def snake_case__ ( __lowerCamelCase : int ): """simple docstring""" lowerCamelCase__ : List[Any] =[0] * len(__lowerCamelCase ) lowerCamelCase__ : List[Any] =[] lowerCamelCase__ : List[Any] =[1] * len(__lowerCamelCase ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(__lowerCamelCase ) ): if indegree[i] == 0: queue.append(__lowerCamelCase ) while queue: lowerCamelCase__ : Tuple =queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: lowerCamelCase__ : Optional[Any] =long_dist[vertex] + 1 if indegree[x] == 0: queue.append(__lowerCamelCase ) print(max(__lowerCamelCase ) ) # Adjacency list of Graph _lowercase : Optional[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
272
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor _A : List[str] = logging.get_logger(__name__) class a__ ( a_ ): def __init__( self , *_a , **_a ): warnings.warn( "The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use DeformableDetrImageProcessor instead." , _a , ) super().__init__(*_a , **_a )
202
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _A : Optional[int] = { """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Union[str, Any] = ["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Dict = ["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : int = [ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys _A : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
202
1
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig _A = logging.get_logger(__name__) # General docstring _A = '''ResNetConfig''' # Base docstring _A = '''microsoft/resnet-50''' _A = [1, 2_048, 7, 7] # Image classification docstring _A = '''microsoft/resnet-50''' _A = '''tiger cat''' _A = [ '''microsoft/resnet-50''', # See all resnet models at https://huggingface.co/models?filter=resnet ] class lowercase_ ( nn.Module ): def __init__( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int = 3 , __UpperCamelCase : int = 1 , __UpperCamelCase : str = "relu" ): """simple docstring""" super().__init__() UpperCamelCase_ = nn.Convad( _a , _a , kernel_size=_a , stride=_a , padding=kernel_size // 2 , bias=_a ) UpperCamelCase_ = nn.BatchNormad(_a ) UpperCamelCase_ = ACTaFN[activation] if activation is not None else nn.Identity() def lowerCamelCase_ ( self : Tuple , __UpperCamelCase : Tensor ): """simple docstring""" UpperCamelCase_ = self.convolution(_a ) UpperCamelCase_ = self.normalization(_a ) UpperCamelCase_ = self.activation(_a ) return hidden_state class lowercase_ ( nn.Module ): def __init__( self : Tuple , __UpperCamelCase : ResNetConfig ): """simple docstring""" super().__init__() UpperCamelCase_ = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) UpperCamelCase_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) UpperCamelCase_ = config.num_channels def lowerCamelCase_ ( self : List[str] , __UpperCamelCase : Tensor ): """simple docstring""" UpperCamelCase_ = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" ) UpperCamelCase_ = self.embedder(_a ) UpperCamelCase_ = self.pooler(_a ) return embedding class lowercase_ ( nn.Module ): def __init__( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int = 2 ): """simple docstring""" super().__init__() UpperCamelCase_ = nn.Convad(_a , _a , kernel_size=1 , stride=_a , bias=_a ) UpperCamelCase_ = nn.BatchNormad(_a ) def lowerCamelCase_ ( self : Tuple , __UpperCamelCase : Tensor ): """simple docstring""" UpperCamelCase_ = self.convolution(_a ) UpperCamelCase_ = self.normalization(_a ) return hidden_state class lowercase_ ( nn.Module ): def __init__( self : Any , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int = 1 , __UpperCamelCase : str = "relu" ): """simple docstring""" super().__init__() UpperCamelCase_ = in_channels != out_channels or stride != 1 UpperCamelCase_ = ( ResNetShortCut(_a , _a , stride=_a ) if should_apply_shortcut else nn.Identity() ) UpperCamelCase_ = nn.Sequential( ResNetConvLayer(_a , _a , stride=_a ) , ResNetConvLayer(_a , _a , activation=_a ) , ) UpperCamelCase_ = ACTaFN[activation] def lowerCamelCase_ ( self : List[str] , __UpperCamelCase : int ): """simple docstring""" UpperCamelCase_ = hidden_state UpperCamelCase_ = self.layer(_a ) UpperCamelCase_ = self.shortcut(_a ) hidden_state += residual UpperCamelCase_ = self.activation(_a ) return hidden_state class lowercase_ ( nn.Module ): def __init__( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int = 1 , __UpperCamelCase : str = "relu" , __UpperCamelCase : int = 4 ): """simple docstring""" super().__init__() UpperCamelCase_ = in_channels != out_channels or stride != 1 UpperCamelCase_ = out_channels // reduction UpperCamelCase_ = ( ResNetShortCut(_a , _a , stride=_a ) if should_apply_shortcut else nn.Identity() ) UpperCamelCase_ = nn.Sequential( ResNetConvLayer(_a , _a , kernel_size=1 ) , ResNetConvLayer(_a , _a , stride=_a ) , ResNetConvLayer(_a , _a , kernel_size=1 , activation=_a ) , ) UpperCamelCase_ = ACTaFN[activation] def lowerCamelCase_ ( self : Tuple , __UpperCamelCase : int ): """simple docstring""" UpperCamelCase_ = hidden_state UpperCamelCase_ = self.layer(_a ) UpperCamelCase_ = self.shortcut(_a ) hidden_state += residual UpperCamelCase_ = self.activation(_a ) return hidden_state class lowercase_ ( nn.Module ): def __init__( self : Optional[int] , __UpperCamelCase : ResNetConfig , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 2 , ): """simple docstring""" super().__init__() UpperCamelCase_ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer UpperCamelCase_ = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(_a , _a , stride=_a , activation=config.hidden_act ) , *[layer(_a , _a , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def lowerCamelCase_ ( self : Dict , __UpperCamelCase : Tensor ): """simple docstring""" UpperCamelCase_ = input for layer in self.layers: UpperCamelCase_ = layer(_a ) return hidden_state class lowercase_ ( nn.Module ): def __init__( self : Optional[Any] , __UpperCamelCase : ResNetConfig ): """simple docstring""" super().__init__() UpperCamelCase_ = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( _a , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) UpperCamelCase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(_a , config.depths[1:] ): self.stages.append(ResNetStage(_a , _a , _a , depth=_a ) ) def lowerCamelCase_ ( self : str , __UpperCamelCase : Tensor , __UpperCamelCase : bool = False , __UpperCamelCase : bool = True ): """simple docstring""" UpperCamelCase_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: UpperCamelCase_ = hidden_states + (hidden_state,) UpperCamelCase_ = stage_module(_a ) if output_hidden_states: UpperCamelCase_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=_a , hidden_states=_a , ) class lowercase_ ( lowercase__ ): A__ : List[str] = ResNetConfig A__ : List[str] = '''resnet''' A__ : int = '''pixel_values''' A__ : int = True def lowerCamelCase_ ( self : Tuple , __UpperCamelCase : int ): """simple docstring""" if isinstance(_a , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" ) elif isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowerCamelCase_ ( self : str , __UpperCamelCase : int , __UpperCamelCase : List[str]=False ): """simple docstring""" if isinstance(_a , _a ): UpperCamelCase_ = value _A = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' _A = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( """The bare ResNet model outputting raw features without any specific head on top.""" , lowercase__ , ) class lowercase_ ( lowercase__ ): def __init__( self : List[Any] , __UpperCamelCase : Tuple ): """simple docstring""" super().__init__(_a ) UpperCamelCase_ = config UpperCamelCase_ = ResNetEmbeddings(_a ) UpperCamelCase_ = ResNetEncoder(_a ) UpperCamelCase_ = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCamelCase_ ( self : Optional[int] , __UpperCamelCase : Tensor , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None ): """simple docstring""" UpperCamelCase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase_ = self.embedder(_a ) UpperCamelCase_ = self.encoder( _a , output_hidden_states=_a , return_dict=_a ) UpperCamelCase_ = encoder_outputs[0] UpperCamelCase_ = self.pooler(_a ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_a , pooler_output=_a , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( """ ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ , lowercase__ , ) class lowercase_ ( lowercase__ ): def __init__( self : Optional[Any] , __UpperCamelCase : Tuple ): """simple docstring""" super().__init__(_a ) UpperCamelCase_ = config.num_labels UpperCamelCase_ = ResNetModel(_a ) # classification head UpperCamelCase_ = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCamelCase_ ( self : Dict , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[torch.LongTensor] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , ): """simple docstring""" UpperCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase_ = self.resnet(_a , output_hidden_states=_a , return_dict=_a ) UpperCamelCase_ = outputs.pooler_output if return_dict else outputs[1] UpperCamelCase_ = self.classifier(_a ) UpperCamelCase_ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: UpperCamelCase_ = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): UpperCamelCase_ = 'single_label_classification' else: UpperCamelCase_ = 'multi_label_classification' if self.config.problem_type == "regression": UpperCamelCase_ = MSELoss() if self.num_labels == 1: UpperCamelCase_ = loss_fct(logits.squeeze() , labels.squeeze() ) else: UpperCamelCase_ = loss_fct(_a , _a ) elif self.config.problem_type == "single_label_classification": UpperCamelCase_ = CrossEntropyLoss() UpperCamelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": UpperCamelCase_ = BCEWithLogitsLoss() UpperCamelCase_ = loss_fct(_a , _a ) if not return_dict: UpperCamelCase_ = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_a , logits=_a , hidden_states=outputs.hidden_states ) @add_start_docstrings( """ ResNet backbone, to be used with frameworks like DETR and MaskFormer. """ , lowercase__ , ) class lowercase_ ( lowercase__ , lowercase__ ): def __init__( self : List[str] , __UpperCamelCase : Optional[int] ): """simple docstring""" super().__init__(_a ) super()._init_backbone(_a ) UpperCamelCase_ = [config.embedding_size] + config.hidden_sizes UpperCamelCase_ = ResNetEmbeddings(_a ) UpperCamelCase_ = ResNetEncoder(_a ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_a ) @replace_return_docstrings(output_type=_a , config_class=_CONFIG_FOR_DOC ) def lowerCamelCase_ ( self : Optional[int] , __UpperCamelCase : Tensor , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None ): """simple docstring""" UpperCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase_ = self.embedder(_a ) UpperCamelCase_ = self.encoder(_a , output_hidden_states=_a , return_dict=_a ) UpperCamelCase_ = outputs.hidden_states UpperCamelCase_ = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: UpperCamelCase_ = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=_a , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_a , )
355
from math import pow, sqrt def lowerCamelCase__ ( *a__ : float ) -> bool: UpperCamelCase_ = len(a__ ) > 0 and all(value > 0.0 for value in values ) return result def lowerCamelCase__ ( a__ : float , a__ : float ) -> float | ValueError: return ( round(sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(a__ , a__ ) else ValueError("""Input Error: Molar mass values must greater than 0.""" ) ) def lowerCamelCase__ ( a__ : float , a__ : float , a__ : float ) -> float | ValueError: return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(a__ , a__ , a__ ) else ValueError( """Input Error: Molar mass and effusion rate values must greater than 0.""" ) ) def lowerCamelCase__ ( a__ : float , a__ : float , a__ : float ) -> float | ValueError: return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(a__ , a__ , a__ ) else ValueError( """Input Error: Molar mass and effusion rate values must greater than 0.""" ) ) def lowerCamelCase__ ( a__ : float , a__ : float , a__ : float ) -> float | ValueError: return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 ) if validate(a__ , a__ , a__ ) else ValueError( """Input Error: Molar mass and effusion rate values must greater than 0.""" ) ) def lowerCamelCase__ ( a__ : float , a__ : float , a__ : float ) -> float | ValueError: return ( round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 ) if validate(a__ , a__ , a__ ) else ValueError( """Input Error: Molar mass and effusion rate values must greater than 0.""" ) )
261
0
from __future__ import annotations lowercase__ :Any = 1.60_21E-19 # units = C def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): '''simple docstring''' if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif conductivity < 0: raise ValueError('''Conductivity cannot be negative''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative''' ) elif mobility < 0: raise ValueError('''mobility cannot be negative''' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
101
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' lowercase = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''') lowercase = ( ('''layer.''', '''layer_'''), ('''word_embeddings.weight''', '''word_embeddings'''), ('''position_embeddings.weight''', '''position_embeddings'''), ('''token_type_embeddings.weight''', '''token_type_embeddings'''), ('''.''', '''/'''), ('''LayerNorm/weight''', '''LayerNorm/gamma'''), ('''LayerNorm/bias''', '''LayerNorm/beta'''), ('''weight''', '''kernel'''), ) if not os.path.isdir(lowerCAmelCase__ ): os.makedirs(lowerCAmelCase__ ) lowercase = model.state_dict() def to_tf_var_name(lowerCAmelCase__ ): for patt, repl in iter(lowerCAmelCase__ ): lowercase = name.replace(lowerCAmelCase__ , lowerCAmelCase__ ) return f'bert/{name}' def create_tf_var(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): lowercase = tf.dtypes.as_dtype(tensor.dtype ) lowercase = tf.get_variable(dtype=lowerCAmelCase__ , shape=tensor.shape , name=lowerCAmelCase__ , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(lowerCAmelCase__ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: lowercase = to_tf_var_name(lowerCAmelCase__ ) lowercase = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): lowercase = torch_tensor.T lowercase = create_tf_var(tensor=lowerCAmelCase__ , name=lowerCAmelCase__ , session=lowerCAmelCase__ ) tf.keras.backend.set_value(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase = session.run(lowerCAmelCase__ ) print(f'Successfully created {tf_name}: {np.allclose(lowerCAmelCase__ , lowerCAmelCase__ )}' ) lowercase = tf.train.Saver(tf.trainable_variables() ) saver.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) ) def UpperCamelCase ( lowerCAmelCase__=None ): '''simple docstring''' lowercase = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''model name e.g. bert-base-uncased''' ) parser.add_argument( '''--cache_dir''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory containing pytorch model''' ) parser.add_argument('''--pytorch_model_path''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''/path/to/<pytorch-model-name>.bin''' ) parser.add_argument('''--tf_cache_dir''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory in which to save tensorflow model''' ) lowercase = parser.parse_args(lowerCAmelCase__ ) lowercase = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
101
1
"""simple docstring""" from __future__ import annotations def _a ( _snake_case , _snake_case ): """simple docstring""" if nth_term == "": return [""] UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) UpperCAmelCase = [] for temp in range(int(_SCREAMING_SNAKE_CASE ) ): series.append(F'''1 / {pow(temp + 1 , int(_SCREAMING_SNAKE_CASE ) )}''' if series else """1""" ) return series if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase = int(input("""Enter the last number (nth term) of the P-Series""")) _UpperCamelCase = int(input("""Enter the power for P-Series""")) print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""") print(p_series(nth_term, power))
351
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel _UpperCamelCase = { """text_branch""": """text_model""", """audio_branch""": """audio_model.audio_encoder""", """attn""": """attention.self""", """self.proj""": """output.dense""", """attention.self_mask""": """attn_mask""", """mlp.fc1""": """intermediate.dense""", """mlp.fc2""": """output.dense""", """norm1""": """layernorm_before""", """norm2""": """layernorm_after""", """bn0""": """batch_norm""", } _UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""") def _a ( _snake_case , _snake_case=False ): """simple docstring""" UpperCAmelCase , UpperCAmelCase = create_model( """HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = {} UpperCAmelCase = R""".*sequential.(\d+).*""" UpperCAmelCase = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: UpperCAmelCase = key.replace(_snake_case , _snake_case ) if re.match(_snake_case , _snake_case ): # replace sequential layers with list UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 ) UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' ) elif re.match(_snake_case , _snake_case ): UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... UpperCAmelCase = 1 if projecton_layer == 0 else 2 UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' ) if "audio" and "qkv" in key: # split qkv into query key and value UpperCAmelCase = value UpperCAmelCase = mixed_qkv.size(0 ) // 3 UpperCAmelCase = mixed_qkv[:qkv_dim] UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2] UpperCAmelCase = mixed_qkv[qkv_dim * 2 :] UpperCAmelCase = query_layer UpperCAmelCase = key_layer UpperCAmelCase = value_layer else: UpperCAmelCase = value return model_state_dict def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ): """simple docstring""" UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case ) clap_model.eval() UpperCAmelCase = clap_model.state_dict() UpperCAmelCase = rename_state_dict(_snake_case ) UpperCAmelCase = ClapConfig() UpperCAmelCase = enable_fusion UpperCAmelCase = ClapModel(_snake_case ) # ignore the spectrogram embedding layer model.load_state_dict(_snake_case , strict=_snake_case ) model.save_pretrained(_snake_case ) transformers_config.save_pretrained(_snake_case ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""") _UpperCamelCase = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
234
0
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def __a(): '''simple docstring''' _lowerCAmelCase = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png" _lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert("RGB" ) return image def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' _lowerCAmelCase = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") ) # fmt: on return rename_keys def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ): '''simple docstring''' _lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = val def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _lowerCAmelCase = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) _lowerCAmelCase = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict _lowerCAmelCase = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE_ , requires_grad=SCREAMING_SNAKE_CASE_ ), v_bias) ) _lowerCAmelCase = qkv_bias def __a(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' _lowerCAmelCase = 364 if "coco" in model_name else 224 _lowerCAmelCase = BlipaVisionConfig(image_size=SCREAMING_SNAKE_CASE_ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: _lowerCAmelCase = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=SCREAMING_SNAKE_CASE_ ).to_dict() elif "opt-6.7b" in model_name: _lowerCAmelCase = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=SCREAMING_SNAKE_CASE_ ).to_dict() elif "t5-xl" in model_name: _lowerCAmelCase = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _lowerCAmelCase = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() _lowerCAmelCase = BlipaConfig(vision_config=SCREAMING_SNAKE_CASE_ , text_config=SCREAMING_SNAKE_CASE_ ) return config, image_size @torch.no_grad() def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ): '''simple docstring''' _lowerCAmelCase = ( AutoTokenizer.from_pretrained("facebook/opt-2.7b" ) if "opt" in model_name else AutoTokenizer.from_pretrained("google/flan-t5-xl" ) ) _lowerCAmelCase = tokenizer("\n" , add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids[0] _lowerCAmelCase , _lowerCAmelCase = get_blipa_config(SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = BlipaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval() _lowerCAmelCase = { "blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"), "blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"), "blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"), "blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"), "blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"), "blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"), "blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"), } _lowerCAmelCase , _lowerCAmelCase = model_name_to_original[model_name] # load original model print("Loading original model..." ) _lowerCAmelCase = "cuda" if torch.cuda.is_available() else "cpu" _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = load_model_and_preprocess( name=SCREAMING_SNAKE_CASE_ , model_type=SCREAMING_SNAKE_CASE_ , is_eval=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ ) original_model.eval() print("Done!" ) # update state dict keys _lowerCAmelCase = original_model.state_dict() _lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ ) if key.startswith("Qformer.bert" ): _lowerCAmelCase = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: _lowerCAmelCase = key.replace("self" , "attention" ) if "opt_proj" in key: _lowerCAmelCase = key.replace("opt_proj" , "language_projection" ) if "t5_proj" in key: _lowerCAmelCase = key.replace("t5_proj" , "language_projection" ) if key.startswith("opt" ): _lowerCAmelCase = key.replace("opt" , "language" ) if key.startswith("t5" ): _lowerCAmelCase = key.replace("t5" , "language" ) _lowerCAmelCase = val # read in qv biases read_in_q_v_bias(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase , _lowerCAmelCase = hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) assert len(SCREAMING_SNAKE_CASE_ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] _lowerCAmelCase = load_demo_image() _lowerCAmelCase = vis_processors["eval"](SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) # create processor _lowerCAmelCase = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = BlipaProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values.to(SCREAMING_SNAKE_CASE_ ) # make sure processor creates exact same pixel values assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) original_model.to(SCREAMING_SNAKE_CASE_ ) hf_model.to(SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): if "opt" in model_name: _lowerCAmelCase = original_model({"image": original_pixel_values, "text_input": [""]} ).logits _lowerCAmelCase = hf_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).logits else: _lowerCAmelCase = original_model( {"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits _lowerCAmelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 ) _lowerCAmelCase = hf_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ).logits assert original_logits.shape == logits.shape print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": _lowerCAmelCase = torch.tensor( [[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=SCREAMING_SNAKE_CASE_ ) assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": _lowerCAmelCase = torch.tensor( [[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=SCREAMING_SNAKE_CASE_ ) else: # cast to same type _lowerCAmelCase = logits.dtype assert torch.allclose(original_logits.to(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , atol=1e-2 ) print("Looks ok!" ) print("Generating a caption..." ) _lowerCAmelCase = "" _lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = original_model.generate({"image": original_pixel_values} ) _lowerCAmelCase = hf_model.generate( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("Original generation:" , SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = input_ids.shape[1] _lowerCAmelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = [text.strip() for text in output_text] print("HF generation:" , SCREAMING_SNAKE_CASE_ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: processor.push_to_hub(F'''nielsr/{model_name}''' ) hf_model.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() _SCREAMING_SNAKE_CASE = [ "blip2-opt-2.7b", "blip2-opt-6.7b", "blip2-opt-2.7b-coco", "blip2-opt-6.7b-coco", "blip2-flan-t5-xl", "blip2-flan-t5-xl-coco", "blip2-flan-t5-xxl", ] parser.add_argument( "--model_name", default="blip2-opt-2.7b", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
158
'''simple docstring''' import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' _lowerCAmelCase = FunnelConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowerCAmelCase = FunnelBaseModel(SCREAMING_SNAKE_CASE_ ) if base_model else FunnelModel(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not." ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
158
1
import datasets from .evaluate import evaluate _UpperCamelCase = '''\ @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } ''' _UpperCamelCase = ''' This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. ''' _UpperCamelCase = ''' Computes SQuAD scores (F1 and EM). Args: predictions: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair as given in the references (see below) - \'prediction_text\': the text of the answer references: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair (see above), - \'answers\': a Dict in the SQuAD dataset format { \'text\': list of possible texts for the answer, as a list of strings \'answer_start\': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: \'exact_match\': Exact match (the normalized answer exactly match the gold answer) \'f1\': The F-score of predicted tokens versus the gold answer Examples: >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}] >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}] >>> squad_metric = datasets.load_metric("squad") >>> results = squad_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 100.0, \'f1\': 100.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): '''simple docstring''' def UpperCamelCase__ (self ) -> Union[str, Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )}, 'references': { 'id': datasets.Value('string' ), 'answers': datasets.features.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), }, } ) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , ) def UpperCamelCase__ (self , __a , __a ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = {prediction['id']: prediction['prediction_text'] for prediction in predictions} UpperCAmelCase__ = [ { 'paragraphs': [ { 'qas': [ { 'answers': [{'text': answer_text} for answer_text in ref['answers']['text']], 'id': ref['id'], } for ref in references ] } ] } ] UpperCAmelCase__ = evaluate(dataset=__a , predictions=__a ) return score
335
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class lowercase : '''simple docstring''' def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Tuple: """simple docstring""" UpperCAmelCase__ = parent UpperCAmelCase__ = 13 UpperCAmelCase__ = 7 UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = 99 UpperCAmelCase__ = 384 UpperCAmelCase__ = 2 UpperCAmelCase__ = 4 UpperCAmelCase__ = 37 UpperCAmelCase__ = 'gelu' UpperCAmelCase__ = 0.1 UpperCAmelCase__ = 0.1 UpperCAmelCase__ = 512 UpperCAmelCase__ = 16 UpperCAmelCase__ = 2 UpperCAmelCase__ = 0.02 UpperCAmelCase__ = 3 UpperCAmelCase__ = 4 UpperCAmelCase__ = 128 UpperCAmelCase__ = 2 UpperCAmelCase__ = 9 UpperCAmelCase__ = 1 UpperCAmelCase__ = None def UpperCamelCase__ (self ) -> List[str]: """simple docstring""" UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ = None if self.use_input_mask: UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ = None if self.use_token_type_ids: UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = None if self.use_labels: UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Tuple: """simple docstring""" UpperCAmelCase__ = TFConvBertModel(config=__a ) UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} UpperCAmelCase__ = [input_ids, input_mask] UpperCAmelCase__ = model(__a ) UpperCAmelCase__ = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Any: """simple docstring""" UpperCAmelCase__ = TFConvBertForMaskedLM(config=__a ) UpperCAmelCase__ = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } UpperCAmelCase__ = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = self.num_labels UpperCAmelCase__ = TFConvBertForSequenceClassification(config=__a ) UpperCAmelCase__ = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } UpperCAmelCase__ = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[int]: """simple docstring""" UpperCAmelCase__ = self.num_choices UpperCAmelCase__ = TFConvBertForMultipleChoice(config=__a ) UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase__ = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } UpperCAmelCase__ = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = self.num_labels UpperCAmelCase__ = TFConvBertForTokenClassification(config=__a ) UpperCAmelCase__ = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } UpperCAmelCase__ = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = TFConvBertForQuestionAnswering(config=__a ) UpperCAmelCase__ = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } UpperCAmelCase__ = model(__a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ (self ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) = config_and_inputs UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) __SCREAMING_SNAKE_CASE = ( { """feature-extraction""": TFConvBertModel, """fill-mask""": TFConvBertForMaskedLM, """question-answering""": TFConvBertForQuestionAnswering, """text-classification""": TFConvBertForSequenceClassification, """token-classification""": TFConvBertForTokenClassification, """zero-shot""": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def UpperCamelCase__ (self ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = TFConvBertModelTester(self ) UpperCAmelCase__ = ConfigTester(self , config_class=__a , hidden_size=37 ) def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ (self ) -> str: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def UpperCamelCase__ (self ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__a ) def UpperCamelCase__ (self ) -> Any: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a ) def UpperCamelCase__ (self ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__a ) def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) @slow def UpperCamelCase__ (self ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ = True UpperCAmelCase__ = True if hasattr(__a , 'use_cache' ): UpperCAmelCase__ = True UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a ) for model_class in self.all_model_classes: UpperCAmelCase__ = self._prepare_for_class(__a , __a ) UpperCAmelCase__ = model_class(__a ) UpperCAmelCase__ = len(model(__a ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__a , saved_model=__a ) UpperCAmelCase__ = os.path.join(__a , 'saved_model' , '1' ) UpperCAmelCase__ = tf.keras.models.load_model(__a ) UpperCAmelCase__ = model(__a ) if self.is_encoder_decoder: UpperCAmelCase__ = outputs['encoder_hidden_states'] UpperCAmelCase__ = outputs['encoder_attentions'] else: UpperCAmelCase__ = outputs['hidden_states'] UpperCAmelCase__ = outputs['attentions'] self.assertEqual(len(__a ) , __a ) UpperCAmelCase__ = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__a ) , __a ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def UpperCamelCase__ (self ) -> Any: """simple docstring""" UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(__a ) def UpperCamelCase__ (self ) -> List[str]: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ = True UpperCAmelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a ) UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a ) def check_decoder_attentions_output(__a ): UpperCAmelCase__ = len(__a ) self.assertEqual(out_len % 2 , 0 ) UpperCAmelCase__ = outputs.decoder_attentions self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(__a ): UpperCAmelCase__ = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: UpperCAmelCase__ = True UpperCAmelCase__ = False UpperCAmelCase__ = model_class(__a ) UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) ) UpperCAmelCase__ = len(__a ) self.assertEqual(config.output_hidden_states , __a ) check_encoder_attentions_output(__a ) if self.is_encoder_decoder: UpperCAmelCase__ = model_class(__a ) UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) ) self.assertEqual(config.output_hidden_states , __a ) check_decoder_attentions_output(__a ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] UpperCAmelCase__ = True UpperCAmelCase__ = model_class(__a ) UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) ) self.assertEqual(config.output_hidden_states , __a ) check_encoder_attentions_output(__a ) # Check attention is always last and order is fine UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = model_class(__a ) UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) ) self.assertEqual(model.config.output_hidden_states , __a ) check_encoder_attentions_output(__a ) @require_tf class lowercase ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase__ (self ) -> int: """simple docstring""" UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCAmelCase__ = model(__a )[0] UpperCAmelCase__ = [1, 6, 768] self.assertEqual(output.shape , __a ) UpperCAmelCase__ = tf.constant( [ [ [-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32], [0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24], [0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
335
1
class __snake_case : def __init__( self ,snake_case ): '''simple docstring''' lowercase : List[Any] = arr.split(""",""" ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Tuple = [int(self.array[0] )] * len(self.array ) lowercase : Any = [int(self.array[0] )] * len(self.array ) for i in range(1 ,len(self.array ) ): lowercase : Optional[int] = max( int(self.array[i] ) + sum_value[i - 1] ,int(self.array[i] ) ) lowercase : Optional[int] = max(sum_value[i] ,rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": lowercase : Any = input("""please input some numbers:""") lowercase : Union[str, Any] = SubArray(whole_array) lowercase : Any = array.solve_sub_array() print(("""the results is:""", re))
20
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class __snake_case ( lowerCAmelCase ): _a : BigBirdConfig _a : jnp.dtype= jnp.floataa _a : bool= True def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' super().setup() lowercase : List[str] = nn.Dense(5 ,dtype=self.dtype ) def __call__( self ,*snake_case ,**snake_case ): '''simple docstring''' lowercase : int = super().__call__(*snake_case ,**snake_case ) lowercase : Any = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class __snake_case ( lowerCAmelCase ): _a : List[Any]= FlaxBigBirdForNaturalQuestionsModule def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]: def cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ): lowercase : int = logits.shape[-1] lowercase : Dict = (labels[..., None] == jnp.arange(SCREAMING_SNAKE_CASE__ )[None]).astype("""f4""" ) lowercase : Any = jax.nn.log_softmax(SCREAMING_SNAKE_CASE__ , axis=-1 ) lowercase : Optional[Any] = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: lowercase : Any = reduction(SCREAMING_SNAKE_CASE__ ) return loss lowercase : Optional[Any] = partial(SCREAMING_SNAKE_CASE__ , reduction=jnp.mean ) lowercase : Optional[int] = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Dict = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : int = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class __snake_case : _a : str= "google/bigbird-roberta-base" _a : int= 3000 _a : int= 1_0500 _a : int= 128 _a : int= 3 _a : int= 1 _a : int= 5 # tx_args _a : float= 3E-5 _a : float= 0.0 _a : int= 2_0000 _a : float= 0.00_95 _a : str= "bigbird-roberta-natural-questions" _a : str= "training-expt" _a : str= "data/nq-training.jsonl" _a : str= "data/nq-validation.jsonl" def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' os.makedirs(self.base_dir ,exist_ok=snake_case ) lowercase : Optional[int] = os.path.join(self.base_dir ,self.save_dir ) lowercase : Optional[int] = self.batch_size_per_device * jax.device_count() @dataclass class __snake_case : _a : int _a : int= 4096 # no dynamic padding on TPUs def __call__( self ,snake_case ): '''simple docstring''' lowercase : int = self.collate_fn(snake_case ) lowercase : Union[str, Any] = jax.tree_util.tree_map(snake_case ,snake_case ) return batch def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' lowercase , lowercase : Union[str, Any] = self.fetch_inputs(features["""input_ids"""] ) lowercase : Tuple = { """input_ids""": jnp.array(snake_case ,dtype=jnp.intaa ), """attention_mask""": jnp.array(snake_case ,dtype=jnp.intaa ), """start_labels""": jnp.array(features["""start_token"""] ,dtype=jnp.intaa ), """end_labels""": jnp.array(features["""end_token"""] ,dtype=jnp.intaa ), """pooled_labels""": jnp.array(features["""category"""] ,dtype=jnp.intaa ), } return batch def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' lowercase : Tuple = [self._fetch_inputs(snake_case ) for ids in input_ids] return zip(*snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' lowercase : Union[str, Any] = [1 for _ in range(len(snake_case ) )] while len(snake_case ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Any: if seed is not None: lowercase : Optional[int] = dataset.shuffle(seed=SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) // batch_size ): lowercase : Optional[Any] = dataset[i * batch_size : (i + 1) * batch_size] yield dict(SCREAMING_SNAKE_CASE__ ) @partial(jax.pmap , axis_name="""batch""" ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[Any]: def loss_fn(SCREAMING_SNAKE_CASE__ ): lowercase : List[str] = model_inputs.pop("""start_labels""" ) lowercase : Optional[int] = model_inputs.pop("""end_labels""" ) lowercase : str = model_inputs.pop("""pooled_labels""" ) lowercase : Union[str, Any] = state.apply_fn(**SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ , dropout_rng=SCREAMING_SNAKE_CASE__ , train=SCREAMING_SNAKE_CASE__ ) lowercase , lowercase , lowercase : List[str] = outputs return state.loss_fn( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) lowercase , lowercase : int = jax.random.split(SCREAMING_SNAKE_CASE__ ) lowercase : Dict = jax.value_and_grad(SCREAMING_SNAKE_CASE__ ) lowercase , lowercase : Union[str, Any] = grad_fn(state.params ) lowercase : List[Any] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) lowercase : List[Any] = jax.lax.pmean(SCREAMING_SNAKE_CASE__ , """batch""" ) lowercase : str = state.apply_gradients(grads=SCREAMING_SNAKE_CASE__ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def _snake_case( SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]: lowercase : int = model_inputs.pop("""start_labels""" ) lowercase : Dict = model_inputs.pop("""end_labels""" ) lowercase : Optional[Any] = model_inputs.pop("""pooled_labels""" ) lowercase : Optional[int] = state.apply_fn(**SCREAMING_SNAKE_CASE__ , params=state.params , train=SCREAMING_SNAKE_CASE__ ) lowercase , lowercase , lowercase : List[Any] = outputs lowercase : Dict = state.loss_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : str = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class __snake_case ( train_state.TrainState ): _a : Callable= struct.field(pytree_node=lowerCAmelCase ) @dataclass class __snake_case : _a : Args _a : Callable _a : Callable _a : Callable _a : Callable _a : wandb _a : Callable= None def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case=None ): '''simple docstring''' lowercase : Tuple = model.params lowercase : Any = TrainState.create( apply_fn=model.__call__ ,params=snake_case ,tx=snake_case ,loss_fn=snake_case ,) if ckpt_dir is not None: lowercase , lowercase , lowercase , lowercase , lowercase : Tuple = restore_checkpoint(snake_case ,snake_case ) lowercase : List[str] = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } lowercase , lowercase : Tuple = build_tx(**snake_case ) lowercase : str = train_state.TrainState( step=snake_case ,apply_fn=model.__call__ ,params=snake_case ,tx=snake_case ,opt_state=snake_case ,) lowercase : Any = args lowercase : Optional[Any] = data_collator lowercase : List[str] = lr lowercase : str = params lowercase : Tuple = jax_utils.replicate(snake_case ) return state def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ): '''simple docstring''' lowercase : Dict = self.args lowercase : Optional[Any] = len(snake_case ) // args.batch_size lowercase : int = jax.random.PRNGKey(0 ) lowercase : List[str] = jax.random.split(snake_case ,jax.device_count() ) for epoch in range(args.max_epochs ): lowercase : List[Any] = jnp.array(0 ,dtype=jnp.floataa ) lowercase : List[str] = get_batched_dataset(snake_case ,args.batch_size ,seed=snake_case ) lowercase : int = 0 for batch in tqdm(snake_case ,total=snake_case ,desc=f"Running EPOCH-{epoch}" ): lowercase : Dict = self.data_collator(snake_case ) lowercase , lowercase , lowercase : Optional[int] = self.train_step_fn(snake_case ,snake_case ,**snake_case ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 if i % args.logging_steps == 0: lowercase : Optional[Any] = jax_utils.unreplicate(state.step ) lowercase : List[str] = running_loss.item() / i lowercase : List[str] = self.scheduler_fn(state_step - 1 ) lowercase : int = self.evaluate(snake_case ,snake_case ) lowercase : Tuple = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(snake_case ) ) self.logger.log(snake_case ,commit=snake_case ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f"-e{epoch}-s{i}" ,state=snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ): '''simple docstring''' lowercase : List[str] = get_batched_dataset(snake_case ,self.args.batch_size ) lowercase : Any = len(snake_case ) // self.args.batch_size lowercase : List[Any] = jnp.array(0 ,dtype=jnp.floataa ) lowercase : Optional[int] = 0 for batch in tqdm(snake_case ,total=snake_case ,desc="""Evaluating ... """ ): lowercase : Tuple = self.data_collator(snake_case ) lowercase : Optional[int] = self.val_step_fn(snake_case ,**snake_case ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 return running_loss / i def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ): '''simple docstring''' lowercase : str = jax_utils.unreplicate(snake_case ) print(f"SAVING CHECKPOINT IN {save_dir}" ,end=""" ... """ ) self.model_save_fn(snake_case ,params=state.params ) with open(os.path.join(snake_case ,"""opt_state.msgpack""" ) ,"""wb""" ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args ,os.path.join(snake_case ,"""args.joblib""" ) ) joblib.dump(self.data_collator ,os.path.join(snake_case ,"""data_collator.joblib""" ) ) with open(os.path.join(snake_case ,"""training_state.json""" ) ,"""w""" ) as f: json.dump({"""step""": state.step.item()} ,snake_case ) print("""DONE""" ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: print(f"RESTORING CHECKPOINT FROM {save_dir}" , end=""" ... """ ) with open(os.path.join(SCREAMING_SNAKE_CASE__ , """flax_model.msgpack""" ) , """rb""" ) as f: lowercase : str = from_bytes(state.params , f.read() ) with open(os.path.join(SCREAMING_SNAKE_CASE__ , """opt_state.msgpack""" ) , """rb""" ) as f: lowercase : Optional[int] = from_bytes(state.opt_state , f.read() ) lowercase : Optional[Any] = joblib.load(os.path.join(SCREAMING_SNAKE_CASE__ , """args.joblib""" ) ) lowercase : int = joblib.load(os.path.join(SCREAMING_SNAKE_CASE__ , """data_collator.joblib""" ) ) with open(os.path.join(SCREAMING_SNAKE_CASE__ , """training_state.json""" ) , """r""" ) as f: lowercase : Tuple = json.load(SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]: lowercase : List[str] = num_train_steps - warmup_steps lowercase : Dict = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE__ , end_value=SCREAMING_SNAKE_CASE__ , transition_steps=SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE__ , end_value=1e-7 , transition_steps=SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]: def weight_decay_mask(SCREAMING_SNAKE_CASE__ ): lowercase : List[Any] = traverse_util.flatten_dict(SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = scheduler_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = optax.adamw(learning_rate=SCREAMING_SNAKE_CASE__ , weight_decay=SCREAMING_SNAKE_CASE__ , mask=SCREAMING_SNAKE_CASE__ ) return tx, lr
20
1
'''simple docstring''' _lowerCAmelCase = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' _lowerCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] _lowerCAmelCase = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
184
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 1000 ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = -1 lowerCAmelCase__ : Optional[Any] = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c lowerCAmelCase__ : Optional[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a) lowerCAmelCase__ : Tuple = n - a - b if c * c == (a * a + b * b): lowerCAmelCase__ : int = a * b * c if candidate >= product: lowerCAmelCase__ : Any = candidate return product if __name__ == "__main__": print(F"""{solution() = }""")
184
1
from typing import Any class SCREAMING_SNAKE_CASE__ : def __init__( self , a): lowercase__ : Dict = data lowercase__ : List[Any] = None class SCREAMING_SNAKE_CASE__ : def __init__( self): lowercase__ : List[str] = None def snake_case_ ( self): lowercase__ : List[str] = self.head while temp is not None: print(temp.data , end=' ') lowercase__ : List[Any] = temp.next print() def snake_case_ ( self , a): lowercase__ : Union[str, Any] = Node(a) lowercase__ : int = self.head lowercase__ : Any = new_node def snake_case_ ( self , a , a): if node_data_a == node_data_a: return else: lowercase__ : Optional[Any] = self.head while node_a is not None and node_a.data != node_data_a: lowercase__ : Optional[Any] = node_a.next lowercase__ : Any = self.head while node_a is not None and node_a.data != node_data_a: lowercase__ : Tuple = node_a.next if node_a is None or node_a is None: return lowercase__ , lowercase__ : int = node_a.data, node_a.data if __name__ == "__main__": snake_case_ = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('''After swapping''') ll.print_list()
214
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor snake_case_ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ (__snake_case ): def __init__( self , *a , **a): warnings.warn( 'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DPTImageProcessor instead.' , a , ) super().__init__(*a , **a)
214
1
"""simple docstring""" import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex __snake_case = logging.getLogger(__name__) class _lowerCAmelCase : def __init__( self ) -> Union[str, Any]: '''simple docstring''' snake_case : Union[str, Any] = False def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: '''simple docstring''' if not self.initialized: snake_case : Tuple = RagRetriever( _SCREAMING_SNAKE_CASE , question_encoder_tokenizer=_SCREAMING_SNAKE_CASE , generator_tokenizer=_SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , init_retrieval=_SCREAMING_SNAKE_CASE , ) snake_case : Any = True def lowerCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' self.retriever.index.init_index() def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int: '''simple docstring''' snake_case ,snake_case : Optional[Any] = self.retriever._main_retrieve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return doc_ids, retrieved_doc_embeds class _lowerCAmelCase ( lowerCAmelCase__ ): def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> Dict: '''simple docstring''' if index is not None and index.is_initialized() and len(_SCREAMING_SNAKE_CASE ) > 0: raise ValueError( "When using Ray for distributed fine-tuning, " "you'll need to provide the paths instead, " "as the dataset and the index are loaded " "separately. More info in examples/rag/use_own_knowledge_dataset.py " ) super().__init__( _SCREAMING_SNAKE_CASE , question_encoder_tokenizer=_SCREAMING_SNAKE_CASE , generator_tokenizer=_SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , init_retrieval=_SCREAMING_SNAKE_CASE , ) snake_case : int = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for worker in self.retrieval_workers ] ) def lowerCamelCase ( self ) -> Optional[Any]: '''simple docstring''' logger.info("initializing retrieval" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any: '''simple docstring''' if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. snake_case : Any = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] snake_case ,snake_case : str = ray.get(random_worker.retrieve.remote(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) else: snake_case ,snake_case : Optional[Any] = self._main_retrieve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_SCREAMING_SNAKE_CASE ) @classmethod def lowerCamelCase ( cls , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ) -> str: '''simple docstring''' return super(_SCREAMING_SNAKE_CASE , cls ).get_tokenizers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @classmethod def lowerCamelCase ( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' snake_case : Dict = kwargs.pop("config" , _SCREAMING_SNAKE_CASE ) or RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) snake_case : List[str] = RagTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE ) snake_case : List[str] = rag_tokenizer.question_encoder snake_case : str = rag_tokenizer.generator if indexed_dataset is not None: snake_case : Any = "custom" snake_case : Union[str, Any] = CustomHFIndex(config.retrieval_vector_size , _SCREAMING_SNAKE_CASE ) else: snake_case : str = cls._build_index(_SCREAMING_SNAKE_CASE ) return cls( _SCREAMING_SNAKE_CASE , question_encoder_tokenizer=_SCREAMING_SNAKE_CASE , generator_tokenizer=_SCREAMING_SNAKE_CASE , retrieval_workers=_SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , )
355
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __snake_case = logging.getLogger() @unittest.skip('''Temporarily disable the doc tests.''' ) @require_torch @require_tf @slow class _lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , ) -> Tuple: '''simple docstring''' snake_case : List[Any] = [file for file in os.listdir(UpperCamelCase__ ) if os.path.isfile(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )] if identifier is not None: snake_case : Optional[Any] = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(UpperCamelCase__ , UpperCamelCase__ ): for n_ in n_identifier: snake_case : str = [file for file in files if n_ not in file] else: snake_case : str = [file for file in files if n_identifier not in file] snake_case : Tuple = ignore_files or [] ignore_files.append("__init__.py" ) snake_case : Optional[Any] = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing" , UpperCamelCase__ ) if only_modules: snake_case : str = file.split("." )[0] try: snake_case : Optional[int] = getattr(UpperCamelCase__ , UpperCamelCase__ ) snake_case : str = doctest.DocTestSuite(UpperCamelCase__ ) snake_case : Optional[Any] = unittest.TextTestRunner().run(UpperCamelCase__ ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F'{module_identifier} is not a module.' ) else: snake_case : Tuple = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def lowerCamelCase ( self ) -> str: '''simple docstring''' snake_case : Tuple = Path("src/transformers" ) snake_case : List[Any] = "modeling" snake_case : Dict = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ , ignore_files=UpperCamelCase__ ) def lowerCamelCase ( self ) -> Dict: '''simple docstring''' snake_case : Optional[Any] = Path("src/transformers" ) snake_case : Optional[Any] = "tokenization" self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ ) def lowerCamelCase ( self ) -> List[str]: '''simple docstring''' snake_case : Union[str, Any] = Path("src/transformers" ) snake_case : Optional[Any] = "configuration" self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ ) def lowerCamelCase ( self ) -> Optional[Any]: '''simple docstring''' snake_case : List[str] = Path("src/transformers" ) snake_case : List[str] = ["configuration", "modeling", "tokenization"] self.analyze_directory(UpperCamelCase__ , n_identifier=UpperCamelCase__ ) def lowerCamelCase ( self ) -> List[Any]: '''simple docstring''' snake_case : Any = Path("docs/source" ) snake_case : Tuple = ["favicon.ico"] self.analyze_directory(UpperCamelCase__ , ignore_files=UpperCamelCase__ , only_modules=UpperCamelCase__ )
112
0
'''simple docstring''' import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase__ ( lowercase__ ): """simple docstring""" __UpperCAmelCase : List[Any] = (IPNDMScheduler,) __UpperCAmelCase : Union[str, Any] = (('''num_inference_steps''', 50),) def __lowercase ( self : Any ,**_a : Union[str, Any] ): '''simple docstring''' _a : int = {'num_train_timesteps': 1000} config.update(**_a ) return config def __lowercase ( self : Dict ,_a : Dict=0 ,**_a : List[str] ): '''simple docstring''' _a : int = dict(self.forward_default_kwargs ) _a : Optional[Any] = kwargs.pop('num_inference_steps' ,_a ) _a : List[str] = self.dummy_sample _a : Any = 0.1 * sample _a : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _a : Tuple = self.get_scheduler_config(**_a ) _a : Dict = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals _a : Tuple = dummy_past_residuals[:] if time_step is None: _a : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) _a : int = scheduler_class.from_pretrained(_a ) new_scheduler.set_timesteps(_a ) # copy over dummy past residuals _a : Dict = dummy_past_residuals[:] _a : Optional[int] = scheduler.step(_a ,_a ,_a ,**_a ).prev_sample _a : str = new_scheduler.step(_a ,_a ,_a ,**_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _a : Union[str, Any] = scheduler.step(_a ,_a ,_a ,**_a ).prev_sample _a : List[Any] = new_scheduler.step(_a ,_a ,_a ,**_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowercase ( self : Optional[Any] ): '''simple docstring''' pass def __lowercase ( self : List[Any] ,_a : Tuple=0 ,**_a : Dict ): '''simple docstring''' _a : Optional[Any] = dict(self.forward_default_kwargs ) _a : str = kwargs.pop('num_inference_steps' ,_a ) _a : Optional[Any] = self.dummy_sample _a : List[Any] = 0.1 * sample _a : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _a : Optional[Any] = self.get_scheduler_config() _a : Optional[Any] = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals (must be after setting timesteps) _a : List[str] = dummy_past_residuals[:] if time_step is None: _a : Dict = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) _a : Dict = scheduler_class.from_pretrained(_a ) # copy over dummy past residuals new_scheduler.set_timesteps(_a ) # copy over dummy past residual (must be after setting timesteps) _a : int = dummy_past_residuals[:] _a : int = scheduler.step(_a ,_a ,_a ,**_a ).prev_sample _a : Optional[int] = new_scheduler.step(_a ,_a ,_a ,**_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _a : Optional[Any] = scheduler.step(_a ,_a ,_a ,**_a ).prev_sample _a : Optional[Any] = new_scheduler.step(_a ,_a ,_a ,**_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def __lowercase ( self : Dict ,**_a : List[Any] ): '''simple docstring''' _a : List[Any] = self.scheduler_classes[0] _a : List[str] = self.get_scheduler_config(**_a ) _a : Union[str, Any] = scheduler_class(**_a ) _a : Optional[Any] = 10 _a : int = self.dummy_model() _a : Optional[int] = self.dummy_sample_deter scheduler.set_timesteps(_a ) for i, t in enumerate(scheduler.timesteps ): _a : Dict = model(_a ,_a ) _a : Optional[int] = scheduler.step(_a ,_a ,_a ).prev_sample for i, t in enumerate(scheduler.timesteps ): _a : Any = model(_a ,_a ) _a : str = scheduler.step(_a ,_a ,_a ).prev_sample return sample def __lowercase ( self : Optional[Any] ): '''simple docstring''' _a : str = dict(self.forward_default_kwargs ) _a : List[Any] = kwargs.pop('num_inference_steps' ,_a ) for scheduler_class in self.scheduler_classes: _a : str = self.get_scheduler_config() _a : int = scheduler_class(**_a ) _a : Any = self.dummy_sample _a : Any = 0.1 * sample if num_inference_steps is not None and hasattr(_a ,'set_timesteps' ): scheduler.set_timesteps(_a ) elif num_inference_steps is not None and not hasattr(_a ,'set_timesteps' ): _a : Union[str, Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _a : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] _a : List[str] = dummy_past_residuals[:] _a : str = scheduler.timesteps[5] _a : Dict = scheduler.timesteps[6] _a : str = scheduler.step(_a ,_a ,_a ,**_a ).prev_sample _a : Optional[int] = scheduler.step(_a ,_a ,_a ,**_a ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) _a : Any = scheduler.step(_a ,_a ,_a ,**_a ).prev_sample _a : int = scheduler.step(_a ,_a ,_a ,**_a ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) def __lowercase ( self : int ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=_a ,time_step=_a ) def __lowercase ( self : Optional[int] ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ): self.check_over_forward(num_inference_steps=_a ,time_step=_a ) def __lowercase ( self : Dict ): '''simple docstring''' _a : Any = self.full_loop() _a : Any = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 254_0529 ) < 10
271
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class UpperCAmelCase__ : """simple docstring""" def __init__( self : int ,_a : Any ,_a : Optional[int]=2 ,_a : Optional[Any]=True ,_a : Dict=False ,_a : Dict=10 ,_a : Any=3 ,_a : str=32 * 8 ,_a : Optional[int]=32 * 8 ,_a : int=4 ,_a : str=64 ,): '''simple docstring''' _a : Dict = parent _a : Union[str, Any] = batch_size _a : Tuple = is_training _a : List[str] = use_auxiliary_loss _a : Optional[Any] = num_queries _a : str = num_channels _a : List[str] = min_size _a : int = max_size _a : Optional[int] = num_labels _a : List[str] = hidden_dim _a : int = hidden_dim def __lowercase ( self : Union[str, Any] ): '''simple docstring''' _a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( _a ) _a : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=_a ) _a : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=_a ) > 0.5 ).float() _a : Tuple = (torch.rand((self.batch_size, self.num_labels) ,device=_a ) > 0.5).long() _a : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __lowercase ( self : Union[str, Any] ): '''simple docstring''' _a : int = MaskaFormerConfig( hidden_size=self.hidden_dim ,) _a : str = self.num_queries _a : Union[str, Any] = self.num_labels _a : Tuple = [1, 1, 1, 1] _a : Dict = self.num_channels _a : str = 64 _a : Tuple = 128 _a : Optional[Any] = self.hidden_dim _a : Union[str, Any] = self.hidden_dim _a : List[Any] = self.hidden_dim return config def __lowercase ( self : Optional[Any] ): '''simple docstring''' _a, _a, _a, _a, _a : Optional[Any] = self.prepare_config_and_inputs() _a : str = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def __lowercase ( self : List[str] ,_a : Optional[Any] ,_a : str ): '''simple docstring''' _a : str = output.encoder_hidden_states _a : Any = output.pixel_decoder_hidden_states _a : Optional[Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_a ) ,config.decoder_layers ) def __lowercase ( self : List[str] ,_a : str ,_a : List[Any] ,_a : Any ,_a : Union[str, Any]=False ): '''simple docstring''' with torch.no_grad(): _a : str = MaskaFormerModel(config=_a ) model.to(_a ) model.eval() _a : Any = model(pixel_values=_a ,pixel_mask=_a ) _a : Optional[Any] = model(_a ,output_hidden_states=_a ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(_a ,_a ) def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Union[str, Any] ,_a : Tuple ,_a : List[str] ,_a : Any ): '''simple docstring''' _a : int = MaskaFormerForUniversalSegmentation(config=_a ) model.to(_a ) model.eval() def comm_check_on_output(_a : Any ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _a : Any = model(pixel_values=_a ,pixel_mask=_a ) _a : Optional[int] = model(_a ) comm_check_on_output(_a ) _a : List[str] = model( pixel_values=_a ,pixel_mask=_a ,mask_labels=_a ,class_labels=_a ) comm_check_on_output(_a ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __UpperCAmelCase : Dict = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} __UpperCAmelCase : Dict = False __UpperCAmelCase : Tuple = False __UpperCAmelCase : Dict = False __UpperCAmelCase : List[Any] = False def __lowercase ( self : Optional[int] ): '''simple docstring''' _a : Union[str, Any] = MaskaFormerModelTester(self ) _a : Dict = ConfigTester(self ,config_class=_a ,has_text_modality=_a ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def __lowercase ( self : Optional[int] ): '''simple docstring''' _a, _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a ) def __lowercase ( self : str ): '''simple docstring''' _a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def __lowercase ( self : Any ): '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def __lowercase ( self : str ): '''simple docstring''' pass @unittest.skip(reason='Mask2Former is not a generative model' ) def __lowercase ( self : List[Any] ): '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def __lowercase ( self : Dict ): '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __lowercase ( self : List[Any] ): '''simple docstring''' pass def __lowercase ( self : int ): '''simple docstring''' _a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : Union[str, Any] = model_class(_a ) _a : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : Optional[Any] = [*signature.parameters.keys()] _a : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] ,_a ) @slow def __lowercase ( self : List[str] ): '''simple docstring''' for model_name in ["facebook/mask2former-swin-small-coco-instance"]: _a : Dict = MaskaFormerModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def __lowercase ( self : List[Any] ): '''simple docstring''' _a : int = (self.model_tester.min_size,) * 2 _a : Any = { 'pixel_values': torch.randn((2, 3, *size) ,device=_a ), 'mask_labels': torch.randn((2, 10, *size) ,device=_a ), 'class_labels': torch.zeros(2 ,10 ,device=_a ).long(), } _a : List[Any] = self.model_tester.get_config() _a : int = MaskaFormerForUniversalSegmentation(_a ).to(_a ) _a : str = model(**_a ) self.assertTrue(outputs.loss is not None ) def __lowercase ( self : List[str] ): '''simple docstring''' _a, _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a ) def __lowercase ( self : int ): '''simple docstring''' _a, _a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : Any = model_class(_a ).to(_a ) _a : Optional[int] = model(**_a ,output_attentions=_a ) self.assertTrue(outputs.attentions is not None ) def __lowercase ( self : Tuple ): '''simple docstring''' if not self.model_tester.is_training: return _a : List[str] = self.all_model_classes[1] _a, _a, _a, _a, _a : List[str] = self.model_tester.prepare_config_and_inputs() _a : Any = model_class(_a ) model.to(_a ) model.train() _a : Union[str, Any] = model(_a ,mask_labels=_a ,class_labels=_a ).loss loss.backward() def __lowercase ( self : int ): '''simple docstring''' _a : int = self.all_model_classes[1] _a, _a, _a, _a, _a : List[Any] = self.model_tester.prepare_config_and_inputs() _a : str = True _a : str = True _a : List[str] = model_class(_a ).to(_a ) model.train() _a : Optional[int] = model(_a ,mask_labels=_a ,class_labels=_a ) _a : Tuple = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _a : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() _a : Dict = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _a : List[str] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_a ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __lowerCAmelCase = 1e-4 def UpperCAmelCase_ (): """simple docstring""" _a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return "facebook/mask2former-swin-small-coco-instance" @cached_property def __lowercase ( self : Any ): '''simple docstring''' return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def __lowercase ( self : Any ): '''simple docstring''' _a : List[str] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a ) _a : int = self.default_image_processor _a : Tuple = prepare_img() _a : Any = image_processor(_a ,return_tensors='pt' ).to(_a ) _a : Union[str, Any] = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_a ,(1, 3, 384, 384) ) with torch.no_grad(): _a : Optional[Any] = model(**_a ) _a : List[Any] = torch.tensor( [[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_a ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) ) _a : str = torch.tensor( [[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_a ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) ) _a : Any = torch.tensor( [[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_a ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,_a ,atol=_a ) ) def __lowercase ( self : Tuple ): '''simple docstring''' _a : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval() _a : Optional[Any] = self.default_image_processor _a : List[Any] = prepare_img() _a : str = image_processor(_a ,return_tensors='pt' ).to(_a ) _a : Any = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_a ,(1, 3, 384, 384) ) with torch.no_grad(): _a : Optional[int] = model(**_a ) # masks_queries_logits _a : Dict = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) _a : Dict = [ [-8.7839, -9.0056, -8.8121], [-7.4104, -7.0313, -6.5401], [-6.6105, -6.3427, -6.4675], ] _a : Optional[Any] = torch.tensor(_a ).to(_a ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,_a ,atol=_a ) ) # class_queries_logits _a : str = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) ) _a : str = torch.tensor( [ [1.8324, -8.0835, -4.1922], [0.8450, -9.0050, -3.6053], [0.3045, -7.7293, -3.0275], ] ).to(_a ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,_a ,atol=_a ) ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' _a : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval() _a : Tuple = self.default_image_processor _a : Tuple = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,) _a : str = inputs['pixel_values'].to(_a ) _a : str = [el.to(_a ) for el in inputs['mask_labels']] _a : Dict = [el.to(_a ) for el in inputs['class_labels']] with torch.no_grad(): _a : List[str] = model(**_a ) self.assertTrue(outputs.loss is not None )
271
1
"""simple docstring""" import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : @staticmethod def _UpperCamelCase ( *snake_case : List[Any] , **snake_case : Tuple ): '''simple docstring''' pass def _lowerCAmelCase ( UpperCAmelCase__ : Image ) ->List[Any]: A__ : Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): snake_case_ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def _UpperCamelCase ( self : Dict , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Union[str, Any] ): '''simple docstring''' A__ : Union[str, Any] = DepthEstimationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _UpperCamelCase ( self : Tuple , snake_case : Tuple , snake_case : str ): '''simple docstring''' A__ : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , lowerCamelCase_ ) import datasets A__ : Union[str, Any] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) A__ : List[str] = depth_estimator( [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] ) self.assertEqual( [ {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, ] , lowerCamelCase_ , ) @require_tf @unittest.skip("""Depth estimation is not implemented in TF""" ) def _UpperCamelCase ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def _UpperCamelCase ( self : int ): '''simple docstring''' A__ : Optional[int] = """Intel/dpt-large""" A__ : str = pipeline("""depth-estimation""" , model=lowerCamelCase_ ) A__ : Dict = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) A__ : Dict = hashimage(outputs["""depth"""] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 ) @require_torch def _UpperCamelCase ( self : List[str] ): '''simple docstring''' self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
365
"""simple docstring""" import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE : def __init__( self : List[str] , snake_case : Tuple , snake_case : List[str]=2 , snake_case : List[str]=8 , snake_case : List[Any]=True , snake_case : Optional[Any]=True , snake_case : List[Any]=True , snake_case : Dict=True , snake_case : Tuple=99 , snake_case : Dict=16 , snake_case : Dict=5 , snake_case : int=2 , snake_case : Any=36 , snake_case : str="gelu" , snake_case : Dict=0.0 , snake_case : List[Any]=0.0 , snake_case : int=512 , snake_case : List[Any]=16 , snake_case : Tuple=2 , snake_case : Any=0.02 , snake_case : Optional[Any]=3 , snake_case : List[Any]=4 , snake_case : str=None , ): '''simple docstring''' A__ : Union[str, Any] = parent A__ : Optional[Any] = batch_size A__ : Dict = seq_length A__ : str = is_training A__ : Tuple = use_input_mask A__ : Dict = use_token_type_ids A__ : Dict = use_labels A__ : int = vocab_size A__ : List[str] = hidden_size A__ : Union[str, Any] = num_hidden_layers A__ : int = num_attention_heads A__ : List[str] = intermediate_size A__ : int = hidden_act A__ : str = hidden_dropout_prob A__ : Tuple = attention_probs_dropout_prob A__ : Any = max_position_embeddings A__ : Optional[int] = type_vocab_size A__ : int = type_sequence_label_size A__ : Optional[Any] = initializer_range A__ : int = num_labels A__ : Optional[int] = num_choices A__ : Optional[int] = scope def _UpperCamelCase ( self : Optional[int] ): '''simple docstring''' A__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : Any = None if self.use_input_mask: A__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) A__ : Optional[int] = None if self.use_token_type_ids: A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ : Dict = None A__ : List[str] = None A__ : Union[str, Any] = None if self.use_labels: A__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ : Any = ids_tensor([self.batch_size] , self.num_choices ) A__ : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCamelCase ( self : List[str] ): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , ) def _UpperCamelCase ( self : Tuple ): '''simple docstring''' A__ : Any = self.get_config() A__ : List[str] = 300 return config def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) : Tuple = self.prepare_config_and_inputs() A__ : List[str] = True A__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A__ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _UpperCamelCase ( self : Any , snake_case : Any , snake_case : Tuple , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Dict ): '''simple docstring''' A__ : List[str] = MraModel(config=snake_case ) model.to(snake_case ) model.eval() A__ : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case ) A__ : List[str] = model(snake_case , token_type_ids=snake_case ) A__ : Union[str, Any] = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self : Optional[Any] , snake_case : List[Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Dict , snake_case : str , snake_case : Dict , snake_case : str , ): '''simple docstring''' A__ : Dict = True A__ : Optional[Any] = MraModel(snake_case ) model.to(snake_case ) model.eval() A__ : Union[str, Any] = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , ) A__ : str = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , ) A__ : Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] ): '''simple docstring''' A__ : Union[str, Any] = MraForMaskedLM(config=snake_case ) model.to(snake_case ) model.eval() A__ : List[Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict , snake_case : Dict , snake_case : Dict , snake_case : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] ): '''simple docstring''' A__ : Dict = MraForQuestionAnswering(config=snake_case ) model.to(snake_case ) model.eval() A__ : str = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCamelCase ( self : Tuple , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] ): '''simple docstring''' A__ : str = self.num_labels A__ : Optional[Any] = MraForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict , snake_case : str , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Tuple , snake_case : Optional[Any] ): '''simple docstring''' A__ : str = self.num_labels A__ : Union[str, Any] = MraForTokenClassification(config=snake_case ) model.to(snake_case ) model.eval() A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] ): '''simple docstring''' A__ : List[str] = self.num_choices A__ : str = MraForMultipleChoice(config=snake_case ) model.to(snake_case ) model.eval() A__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : str = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _UpperCamelCase ( self : List[Any] ): '''simple docstring''' A__ : List[str] = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) : Dict = config_and_inputs A__ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ): snake_case_ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = () def _UpperCamelCase ( self : int ): '''simple docstring''' A__ : Optional[Any] = MraModelTester(self ) A__ : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def _UpperCamelCase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def _UpperCamelCase ( self : Tuple ): '''simple docstring''' A__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def _UpperCamelCase ( self : Tuple ): '''simple docstring''' A__ : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A__ : List[str] = type self.model_tester.create_and_check_model(*snake_case ) def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' A__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' A__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case ) def _UpperCamelCase ( self : Optional[int] ): '''simple docstring''' A__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case ) def _UpperCamelCase ( self : int ): '''simple docstring''' A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case ) def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' A__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) @slow def _UpperCamelCase ( self : Any ): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : str = MraModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @unittest.skip(reason="""MRA does not output attentions""" ) def _UpperCamelCase ( self : Tuple ): '''simple docstring''' return @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' A__ : str = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) A__ : Any = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): A__ : List[Any] = model(snake_case )[0] A__ : List[Any] = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , snake_case ) A__ : int = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) ) @slow def _UpperCamelCase ( self : List[Any] ): '''simple docstring''' A__ : Union[str, Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) A__ : Tuple = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): A__ : List[Any] = model(snake_case )[0] A__ : Dict = 5_0265 A__ : List[str] = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , snake_case ) A__ : List[Any] = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) ) @slow def _UpperCamelCase ( self : Dict ): '''simple docstring''' A__ : Any = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) A__ : List[Any] = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): A__ : List[Any] = model(snake_case )[0] A__ : Union[str, Any] = 5_0265 A__ : Optional[Any] = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , snake_case ) A__ : Optional[int] = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
296
0
"""simple docstring""" import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class __a (enum.Enum): '''simple docstring''' _SCREAMING_SNAKE_CASE :Tuple = 0 _SCREAMING_SNAKE_CASE :Any = 1 _SCREAMING_SNAKE_CASE :str = 2 @add_end_docstrings(UpperCamelCase_) class __a (UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :str = """ In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> """ def __init__( self , *_a , **_a ) -> Dict: """simple docstring""" super().__init__(*_a , **_a ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. SCREAMING_SNAKE_CASE__ : Optional[Any] = None if self.model.config.prefix is not None: SCREAMING_SNAKE_CASE__ : Tuple = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. SCREAMING_SNAKE_CASE__ : Optional[Any] = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self._sanitize_parameters(prefix=_a , **self._forward_params ) SCREAMING_SNAKE_CASE__ : Any = {**self._preprocess_params, **preprocess_params} SCREAMING_SNAKE_CASE__ : Optional[Any] = {**self._forward_params, **forward_params} def _a ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = {} if prefix is not None: SCREAMING_SNAKE_CASE__ : Tuple = prefix if prefix: SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer( _a , padding=_a , add_special_tokens=_a , return_tensors=self.framework ) SCREAMING_SNAKE_CASE__ : int = prefix_inputs["""input_ids"""].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected''' """ [None, 'hole']""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = handle_long_generation preprocess_params.update(_a ) SCREAMING_SNAKE_CASE__ : List[Any] = generate_kwargs SCREAMING_SNAKE_CASE__ : Any = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" ) if return_tensors is not None: raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" ) SCREAMING_SNAKE_CASE__ : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ReturnType.TENSORS if return_type is not None: SCREAMING_SNAKE_CASE__ : Optional[Any] = return_type if clean_up_tokenization_spaces is not None: SCREAMING_SNAKE_CASE__ : Optional[Any] = clean_up_tokenization_spaces if stop_sequence is not None: SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a ) if len(_a ) > 1: warnings.warn( """Stopping on a multiple token sequence is not yet supported on transformers. The first token of""" """ the stop sequence will be used as the stop sequence string in the interim.""" ) SCREAMING_SNAKE_CASE__ : Optional[int] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _a ( self , *_a , **_a ) -> int: """simple docstring""" if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({"""add_space_before_punct_symbol""": True} ) return super()._parse_and_tokenize(*_a , **_a ) def __call__( self , _a , **_a ) -> Dict: """simple docstring""" return super().__call__(_a , **_a ) def _a ( self , _a , _a="" , _a=None , **_a ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer( prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework ) SCREAMING_SNAKE_CASE__ : Optional[Any] = prompt_text if handle_long_generation == "hole": SCREAMING_SNAKE_CASE__ : Any = inputs["""input_ids"""].shape[-1] if "max_new_tokens" in generate_kwargs: SCREAMING_SNAKE_CASE__ : Optional[int] = generate_kwargs["""max_new_tokens"""] else: SCREAMING_SNAKE_CASE__ : Optional[Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError("""We cannot infer how many new tokens are expected""" ) if cur_len + new_tokens > self.tokenizer.model_max_length: SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( """We cannot use `hole` to handle this generation the number of desired tokens exceeds the""" """ models max length""" ) SCREAMING_SNAKE_CASE__ : int = inputs["""input_ids"""][:, -keep_length:] if "attention_mask" in inputs: SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs["""attention_mask"""][:, -keep_length:] return inputs def _a ( self , _a , **_a ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = model_inputs["""input_ids"""] SCREAMING_SNAKE_CASE__ : str = model_inputs.get("""attention_mask""" , _a ) # Allow empty prompts if input_ids.shape[1] == 0: SCREAMING_SNAKE_CASE__ : Tuple = None SCREAMING_SNAKE_CASE__ : Any = None SCREAMING_SNAKE_CASE__ : Tuple = 1 else: SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids.shape[0] SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs.pop("""prompt_text""" ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. SCREAMING_SNAKE_CASE__ : Any = generate_kwargs.pop("""prefix_length""" , 0 ) if prefix_length > 0: SCREAMING_SNAKE_CASE__ : Any = """max_new_tokens""" in generate_kwargs or ( """generation_config""" in generate_kwargs and generate_kwargs["""generation_config"""].max_new_tokens is not None ) if not has_max_new_tokens: SCREAMING_SNAKE_CASE__ : int = generate_kwargs.get("""max_length""" ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length SCREAMING_SNAKE_CASE__ : Optional[int] = """min_new_tokens""" in generate_kwargs or ( """generation_config""" in generate_kwargs and generate_kwargs["""generation_config"""].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a ) SCREAMING_SNAKE_CASE__ : Optional[Any] = generated_sequence.shape[0] if self.framework == "pt": SCREAMING_SNAKE_CASE__ : Dict = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": SCREAMING_SNAKE_CASE__ : List[str] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def _a ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = model_outputs["""generated_sequence"""][0] SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_outputs["""input_ids"""] SCREAMING_SNAKE_CASE__ : Optional[Any] = model_outputs["""prompt_text"""] SCREAMING_SNAKE_CASE__ : List[Any] = generated_sequence.numpy().tolist() SCREAMING_SNAKE_CASE__ : Any = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""generated_token_ids""": sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.decode( _a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: SCREAMING_SNAKE_CASE__ : List[Any] = 0 else: SCREAMING_SNAKE_CASE__ : int = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) ) if return_type == ReturnType.FULL_TEXT: SCREAMING_SNAKE_CASE__ : Dict = prompt_text + text[prompt_length:] else: SCREAMING_SNAKE_CASE__ : int = text[prompt_length:] SCREAMING_SNAKE_CASE__ : Tuple = {"""generated_text""": all_text} records.append(_a ) return records
132
"""simple docstring""" from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image a :Optional[int] = ["text", "image", "audio"] def _lowercase ( __lowerCAmelCase ) -> List[Any]: SCREAMING_SNAKE_CASE__ : List[Any] = [] for input_type in input_types: if input_type == "text": inputs.append("""Text input""" ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) ) elif input_type == "audio": inputs.append(torch.ones(3000 ) ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): inputs.append(create_inputs(__lowerCAmelCase ) ) else: raise ValueError(F'''Invalid type requested: {input_type}''' ) return inputs def _lowercase ( __lowerCAmelCase ) -> List[str]: SCREAMING_SNAKE_CASE__ : Tuple = [] for output in outputs: if isinstance(__lowerCAmelCase , (str, AgentText) ): output_types.append("""text""" ) elif isinstance(__lowerCAmelCase , (Image.Image, AgentImage) ): output_types.append("""image""" ) elif isinstance(__lowerCAmelCase , (torch.Tensor, AgentAudio) ): output_types.append("""audio""" ) else: raise ValueError(F'''Invalid output: {output}''' ) return output_types @is_tool_test class __a : '''simple docstring''' def _a ( self ) -> str: """simple docstring""" self.assertTrue(hasattr(self.tool , """inputs""" ) ) self.assertTrue(hasattr(self.tool , """outputs""" ) ) SCREAMING_SNAKE_CASE__ : List[Any] = self.tool.inputs for _input in inputs: if isinstance(_input , _a ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) SCREAMING_SNAKE_CASE__ : Dict = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def _a ( self ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = create_inputs(self.tool.inputs ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tool(*_a ) # There is a single output if len(self.tool.outputs ) == 1: SCREAMING_SNAKE_CASE__ : List[Any] = [outputs] self.assertListEqual(output_types(_a ) , self.tool.outputs ) def _a ( self ) -> List[Any]: """simple docstring""" self.assertTrue(hasattr(self.tool , """description""" ) ) self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) ) self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) ) def _a ( self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = create_inputs(self.tool.inputs ) SCREAMING_SNAKE_CASE__ : Dict = self.tool(*_a ) if not isinstance(_a , _a ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = [outputs] self.assertEqual(len(_a ) , len(self.tool.outputs ) ) for output, output_type in zip(_a , self.tool.outputs ): SCREAMING_SNAKE_CASE__ : Optional[Any] = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(_a , _a ) ) def _a ( self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = create_inputs(self.tool.inputs ) SCREAMING_SNAKE_CASE__ : List[Any] = [] for _input, input_type in zip(_a , self.tool.inputs ): if isinstance(_a , _a ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tool(*_a ) if not isinstance(_a , _a ): SCREAMING_SNAKE_CASE__ : Optional[Any] = [outputs] self.assertEqual(len(_a ) , len(self.tool.outputs ) )
132
1
'''simple docstring''' import gc import threading import time import psutil import torch class lowercase_ : """simple docstring""" def __init__( self : Union[str, Any] ): """simple docstring""" _SCREAMING_SNAKE_CASE = psutil.Process() _SCREAMING_SNAKE_CASE = False def lowerCAmelCase_ ( self : List[Any] ): """simple docstring""" _SCREAMING_SNAKE_CASE = -1 while True: _SCREAMING_SNAKE_CASE = max(self.process.memory_info().rss , self.cpu_memory_peak ) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def lowerCAmelCase_ ( self : List[str] ): """simple docstring""" _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = threading.Thread(target=self.peak_monitor ) _SCREAMING_SNAKE_CASE = True self.thread.start() def lowerCAmelCase_ ( self : str ): """simple docstring""" _SCREAMING_SNAKE_CASE = False self.thread.join() return self.cpu_memory_peak lowerCamelCase_ = PeakCPUMemory() def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: # Time _SCREAMING_SNAKE_CASE = {"time": time.time()} gc.collect() torch.cuda.empty_cache() # CPU mem _SCREAMING_SNAKE_CASE = psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch.cuda.device_count() ): _SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated(__A ) torch.cuda.reset_peak_memory_stats() return measures def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> Optional[Any]: # Time _SCREAMING_SNAKE_CASE = {"time": time.time() - start_measures["time"]} gc.collect() torch.cuda.empty_cache() # CPU mem _SCREAMING_SNAKE_CASE = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20 _SCREAMING_SNAKE_CASE = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20 # GPU mem for i in range(torch.cuda.device_count() ): _SCREAMING_SNAKE_CASE = (torch.cuda.memory_allocated(__A ) - start_measures[str(__A )]) / 2**20 _SCREAMING_SNAKE_CASE = (torch.cuda.max_memory_allocated(__A ) - start_measures[str(__A )]) / 2**20 return measures def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Dict ) -> Any: print(f"""{description}:""" ) print(f"""- Time: {measures["time"]:.2f}s""" ) for i in range(torch.cuda.device_count() ): print(f"""- GPU {i} allocated: {measures[str(__A )]:.2f}MiB""" ) _SCREAMING_SNAKE_CASE = measures[f"""{i}-peak"""] print(f"""- GPU {i} peak: {peak:.2f}MiB""" ) print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" ) print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
111
'''simple docstring''' lowerCamelCase_ = 'Tobias Carryer' from time import time class lowercase_ : """simple docstring""" def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict=int(time() ) ): # noqa: B008 """simple docstring""" _SCREAMING_SNAKE_CASE = multiplier _SCREAMING_SNAKE_CASE = increment _SCREAMING_SNAKE_CASE = modulo _SCREAMING_SNAKE_CASE = seed def lowerCAmelCase_ ( self : List[str] ): """simple docstring""" _SCREAMING_SNAKE_CASE = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. lowerCamelCase_ = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31) while True: print(lcg.next_number())
111
1
'''simple docstring''' __lowercase = [ '''DownloadConfig''', '''DownloadManager''', '''DownloadMode''', '''StreamingDownloadManager''', ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
272
'''simple docstring''' from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str: '''simple docstring''' lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T return jnp.matmul(_A , norm_emb_a.T ) class a__( nn.Module ): '''simple docstring''' UpperCAmelCase_ : CLIPConfig UpperCAmelCase_ : jnp.dtype = jnp.floataa def a_ ( self): """simple docstring""" lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config) lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype) lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim)) lowerCAmelCase = self.param( """special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim)) lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,)) lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,)) def __call__( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1] lowerCAmelCase = self.visual_projection(__lowerCAmelCase) lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds) lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs lowerCAmelCase = 0.0 lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment lowerCAmelCase = jnp.round(__lowerCAmelCase , 3) lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase) # Use a lower threshold if an image has any special care concept lowerCAmelCase = is_special_care * 0.01 lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment lowerCAmelCase = jnp.round(__lowerCAmelCase , 3) lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1) return has_nsfw_concepts class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : int = CLIPConfig UpperCAmelCase_ : Any = '''clip_input''' UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ): """simple docstring""" if input_shape is None: lowerCAmelCase = (1, 224, 224, 3) lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase) super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None): """simple docstring""" lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase) lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng} lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""] return random_params def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ): """simple docstring""" lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1)) return self.module.apply( {"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
272
1
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __A : Optional[int] = logging.get_logger(__name__) __A : List[str] = OrderedDict( [ ("align", "EfficientNetImageProcessor"), ("beit", "BeitImageProcessor"), ("bit", "BitImageProcessor"), ("blip", "BlipImageProcessor"), ("blip-2", "BlipImageProcessor"), ("bridgetower", "BridgeTowerImageProcessor"), ("chinese_clip", "ChineseCLIPImageProcessor"), ("clip", "CLIPImageProcessor"), ("clipseg", "ViTImageProcessor"), ("conditional_detr", "ConditionalDetrImageProcessor"), ("convnext", "ConvNextImageProcessor"), ("convnextv2", "ConvNextImageProcessor"), ("cvt", "ConvNextImageProcessor"), ("data2vec-vision", "BeitImageProcessor"), ("deformable_detr", "DeformableDetrImageProcessor"), ("deit", "DeiTImageProcessor"), ("deta", "DetaImageProcessor"), ("detr", "DetrImageProcessor"), ("dinat", "ViTImageProcessor"), ("donut-swin", "DonutImageProcessor"), ("dpt", "DPTImageProcessor"), ("efficientformer", "EfficientFormerImageProcessor"), ("efficientnet", "EfficientNetImageProcessor"), ("flava", "FlavaImageProcessor"), ("focalnet", "BitImageProcessor"), ("git", "CLIPImageProcessor"), ("glpn", "GLPNImageProcessor"), ("groupvit", "CLIPImageProcessor"), ("imagegpt", "ImageGPTImageProcessor"), ("instructblip", "BlipImageProcessor"), ("layoutlmv2", "LayoutLMv2ImageProcessor"), ("layoutlmv3", "LayoutLMv3ImageProcessor"), ("levit", "LevitImageProcessor"), ("mask2former", "Mask2FormerImageProcessor"), ("maskformer", "MaskFormerImageProcessor"), ("mgp-str", "ViTImageProcessor"), ("mobilenet_v1", "MobileNetV1ImageProcessor"), ("mobilenet_v2", "MobileNetV2ImageProcessor"), ("mobilevit", "MobileViTImageProcessor"), ("mobilevit", "MobileViTImageProcessor"), ("mobilevitv2", "MobileViTImageProcessor"), ("nat", "ViTImageProcessor"), ("oneformer", "OneFormerImageProcessor"), ("owlvit", "OwlViTImageProcessor"), ("perceiver", "PerceiverImageProcessor"), ("pix2struct", "Pix2StructImageProcessor"), ("poolformer", "PoolFormerImageProcessor"), ("regnet", "ConvNextImageProcessor"), ("resnet", "ConvNextImageProcessor"), ("sam", "SamImageProcessor"), ("segformer", "SegformerImageProcessor"), ("swiftformer", "ViTImageProcessor"), ("swin", "ViTImageProcessor"), ("swin2sr", "Swin2SRImageProcessor"), ("swinv2", "ViTImageProcessor"), ("table-transformer", "DetrImageProcessor"), ("timesformer", "VideoMAEImageProcessor"), ("tvlt", "TvltImageProcessor"), ("upernet", "SegformerImageProcessor"), ("van", "ConvNextImageProcessor"), ("videomae", "VideoMAEImageProcessor"), ("vilt", "ViltImageProcessor"), ("vit", "ViTImageProcessor"), ("vit_hybrid", "ViTHybridImageProcessor"), ("vit_mae", "ViTImageProcessor"), ("vit_msn", "ViTImageProcessor"), ("xclip", "CLIPImageProcessor"), ("yolos", "YolosImageProcessor"), ] ) __A : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def UpperCamelCase_ ( A__ : str ): '''simple docstring''' for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: lowerCAmelCase_ : Optional[Any] = model_type_to_module_name(A__ ) lowerCAmelCase_ : str = importlib.import_module(f'.{module_name}' , """transformers.models""" ) try: return getattr(A__ , A__ ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(A__ , """__name__""" , A__ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowerCAmelCase_ : List[str] = importlib.import_module("""transformers""" ) if hasattr(A__ , A__ ): return getattr(A__ , A__ ) return None def UpperCamelCase_ ( A__ : Union[str, os.PathLike] , A__ : Optional[Union[str, os.PathLike]] = None , A__ : bool = False , A__ : bool = False , A__ : Optional[Dict[str, str]] = None , A__ : Optional[Union[bool, str]] = None , A__ : Optional[str] = None , A__ : bool = False , **A__ : Tuple , ): '''simple docstring''' lowerCAmelCase_ : Optional[Any] = get_file_from_repo( A__ , A__ , cache_dir=A__ , force_download=A__ , resume_download=A__ , proxies=A__ , use_auth_token=A__ , revision=A__ , local_files_only=A__ , ) if resolved_config_file is None: logger.info( """Could not locate the image processor configuration file, will try to use the model config instead.""" ) return {} with open(A__ , encoding="""utf-8""" ) as reader: return json.load(A__ ) class __snake_case : """simple docstring""" def __init__( self : Union[str, Any] ) -> str: raise EnvironmentError( """AutoImageProcessor is designed to be instantiated """ """using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(lowerCamelCase ) def __lowercase ( cls : Tuple , lowerCamelCase : Dict , **lowerCamelCase : Optional[Any] ) -> Dict: lowerCAmelCase_ : List[str] = kwargs.pop("""config""" , lowerCamelCase ) lowerCAmelCase_ : Tuple = kwargs.pop("""trust_remote_code""" , lowerCamelCase ) lowerCAmelCase_ : List[str] = True lowerCAmelCase_, lowerCAmelCase_ : int = ImageProcessingMixin.get_image_processor_dict(lowerCamelCase , **lowerCamelCase ) lowerCAmelCase_ : List[Any] = config_dict.get("""image_processor_type""" , lowerCamelCase ) lowerCAmelCase_ : int = None if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ): lowerCAmelCase_ : str = config_dict["""auto_map"""]["""AutoImageProcessor"""] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: lowerCAmelCase_ : List[Any] = config_dict.pop("""feature_extractor_type""" , lowerCamelCase ) if feature_extractor_class is not None: logger.warning( """Could not find image processor class in the image processor config or the model config. Loading""" """ based on pattern matching with the model's feature extractor configuration.""" ) lowerCAmelCase_ : Optional[int] = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" ) if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): lowerCAmelCase_ : Optional[Any] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] lowerCAmelCase_ : Dict = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" ) logger.warning( """Could not find image processor auto map in the image processor config or the model config.""" """ Loading based on pattern matching with the model's feature extractor configuration.""" ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(lowerCamelCase , lowerCamelCase ): lowerCAmelCase_ : str = AutoConfig.from_pretrained(lowerCamelCase , **lowerCamelCase ) # It could be in `config.image_processor_type`` lowerCAmelCase_ : Optional[Any] = getattr(lowerCamelCase , """image_processor_type""" , lowerCamelCase ) if hasattr(lowerCamelCase , """auto_map""" ) and "AutoImageProcessor" in config.auto_map: lowerCAmelCase_ : Dict = config.auto_map["""AutoImageProcessor"""] if image_processor_class is not None: lowerCAmelCase_ : Optional[Any] = image_processor_class_from_name(lowerCamelCase ) lowerCAmelCase_ : Optional[Any] = image_processor_auto_map is not None lowerCAmelCase_ : Any = image_processor_class is not None or type(lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING lowerCAmelCase_ : Union[str, Any] = resolve_trust_remote_code( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) if has_remote_code and trust_remote_code: lowerCAmelCase_ : Dict = get_class_from_dynamic_module( lowerCamelCase , lowerCamelCase , **lowerCamelCase ) lowerCAmelCase_ : int = kwargs.pop("""code_revision""" , lowerCamelCase ) if os.path.isdir(lowerCamelCase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(lowerCamelCase , **lowerCamelCase ) elif image_processor_class is not None: return image_processor_class.from_dict(lowerCamelCase , **lowerCamelCase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING: lowerCAmelCase_ : List[str] = IMAGE_PROCESSOR_MAPPING[type(lowerCamelCase )] return image_processor_class.from_dict(lowerCamelCase , **lowerCamelCase ) raise ValueError( F'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ' F'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ' F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' ) @staticmethod def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ) -> List[Any]: IMAGE_PROCESSOR_MAPPING.register(lowerCamelCase , lowerCamelCase )
89
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __A : List[str] = logging.get_logger(__name__) def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : Tuple=False ): '''simple docstring''' lowerCAmelCase_ : Tuple = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCAmelCase_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def UpperCamelCase_ ( A__ : Any , A__ : Any , A__ : Tuple=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: lowerCAmelCase_ : Optional[Any] = """""" else: lowerCAmelCase_ : Optional[Any] = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase_ : List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' ) lowerCAmelCase_ : Union[str, Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ : Dict = in_proj_weight[ : config.hidden_size, : ] lowerCAmelCase_ : List[Any] = in_proj_bias[: config.hidden_size] lowerCAmelCase_ : Union[str, Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase_ : List[str] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase_ : Any = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase_ : Union[str, Any] = in_proj_bias[-config.hidden_size :] def UpperCamelCase_ ( A__ : str ): '''simple docstring''' lowerCAmelCase_ : Dict = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(A__ , A__ ) def UpperCamelCase_ ( A__ : List[Any] , A__ : Optional[Any] , A__ : Dict ): '''simple docstring''' lowerCAmelCase_ : Tuple = dct.pop(A__ ) lowerCAmelCase_ : Tuple = val def UpperCamelCase_ ( ): '''simple docstring''' lowerCAmelCase_ : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase_ : Optional[int] = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : List[Any] ): '''simple docstring''' lowerCAmelCase_ : Optional[Any] = ViTConfig() lowerCAmelCase_ : Any = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": lowerCAmelCase_ : int = True lowerCAmelCase_ : Tuple = int(vit_name[-12:-10] ) lowerCAmelCase_ : Optional[int] = int(vit_name[-9:-6] ) else: lowerCAmelCase_ : Optional[int] = 10_00 lowerCAmelCase_ : Tuple = """huggingface/label-files""" lowerCAmelCase_ : Any = """imagenet-1k-id2label.json""" lowerCAmelCase_ : Dict = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) ) lowerCAmelCase_ : Union[str, Any] = {int(A__ ): v for k, v in idalabel.items()} lowerCAmelCase_ : Union[str, Any] = idalabel lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()} lowerCAmelCase_ : Optional[int] = int(vit_name[-6:-4] ) lowerCAmelCase_ : Dict = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): lowerCAmelCase_ : int = 1_92 lowerCAmelCase_ : List[str] = 7_68 lowerCAmelCase_ : List[str] = 12 lowerCAmelCase_ : int = 3 elif vit_name[9:].startswith("""small""" ): lowerCAmelCase_ : Optional[Any] = 3_84 lowerCAmelCase_ : Optional[int] = 15_36 lowerCAmelCase_ : Dict = 12 lowerCAmelCase_ : str = 6 else: pass else: if vit_name[4:].startswith("""small""" ): lowerCAmelCase_ : Tuple = 7_68 lowerCAmelCase_ : Any = 23_04 lowerCAmelCase_ : List[str] = 8 lowerCAmelCase_ : List[str] = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): lowerCAmelCase_ : Dict = 10_24 lowerCAmelCase_ : List[Any] = 40_96 lowerCAmelCase_ : Any = 24 lowerCAmelCase_ : List[str] = 16 elif vit_name[4:].startswith("""huge""" ): lowerCAmelCase_ : Optional[int] = 12_80 lowerCAmelCase_ : Dict = 51_20 lowerCAmelCase_ : Union[str, Any] = 32 lowerCAmelCase_ : Optional[int] = 16 # load original model from timm lowerCAmelCase_ : Union[str, Any] = timm.create_model(A__ , pretrained=A__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys lowerCAmelCase_ : int = timm_model.state_dict() if base_model: remove_classification_head_(A__ ) lowerCAmelCase_ : str = create_rename_keys(A__ , A__ ) for src, dest in rename_keys: rename_key(A__ , A__ , A__ ) read_in_q_k_v(A__ , A__ , A__ ) # load HuggingFace model if vit_name[-5:] == "in21k": lowerCAmelCase_ : int = ViTModel(A__ ).eval() else: lowerCAmelCase_ : Optional[int] = ViTForImageClassification(A__ ).eval() model.load_state_dict(A__ ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: lowerCAmelCase_ : Any = DeiTImageProcessor(size=config.image_size ) else: lowerCAmelCase_ : Any = ViTImageProcessor(size=config.image_size ) lowerCAmelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowerCAmelCase_ : int = encoding["""pixel_values"""] lowerCAmelCase_ : int = model(A__ ) if base_model: lowerCAmelCase_ : Union[str, Any] = timm_model.forward_features(A__ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(A__ , outputs.pooler_output , atol=1E-3 ) else: lowerCAmelCase_ : Union[str, Any] = timm_model(A__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(A__ , outputs.logits , atol=1E-3 ) Path(A__ ).mkdir(exist_ok=A__ ) print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(A__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": __A : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--vit_name", default="vit_base_patch16_224", type=str, help="Name of the ViT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __A : Union[str, Any] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
89
1
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase = "▁" , lowerCamelCase = True , lowerCamelCase = "<unk>" , lowerCamelCase = "</s>" , lowerCamelCase = "<pad>" , ): __a = { "pad": {"id": 0, "token": pad_token}, "eos": {"id": 1, "token": eos_token}, "unk": {"id": 2, "token": unk_token}, } __a = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): __a = token_dict["token"] __a = Tokenizer(Unigram() ) __a = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}" ) , " " ), normalizers.Lowercase(), ] ) __a = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=lowerCamelCase , add_prefix_space=lowerCamelCase ), pre_tokenizers.Digits(individual_digits=lowerCamelCase ), pre_tokenizers.Punctuation(), ] ) __a = decoders.Metaspace(replacement=lowerCamelCase , add_prefix_space=lowerCamelCase ) __a = TemplateProcessing( single=F"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , ) __a = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(lowerCamelCase , lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = 8000 , lowerCamelCase = True , ): __a = trainers.UnigramTrainer( vocab_size=lowerCamelCase , special_tokens=self.special_tokens_list , show_progress=lowerCamelCase , ) if isinstance(lowerCamelCase , lowerCamelCase ): __a = [files] self._tokenizer.train(lowerCamelCase , trainer=lowerCamelCase ) self.add_unk_id() def a__ ( self , lowerCamelCase , lowerCamelCase = 8000 , lowerCamelCase = True , ): __a = trainers.UnigramTrainer( vocab_size=lowerCamelCase , special_tokens=self.special_tokens_list , show_progress=lowerCamelCase , ) self._tokenizer.train_from_iterator(lowerCamelCase , trainer=lowerCamelCase ) self.add_unk_id() def a__ ( self ): __a = json.loads(self._tokenizer.to_str() ) __a = self.special_tokens["unk"]["id"] __a = Tokenizer.from_str(json.dumps(lowerCamelCase ) )
261
"""simple docstring""" import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__:Any = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } SCREAMING_SNAKE_CASE__:Optional[int] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def _lowerCamelCase( a , a , a , a , a ): for attribute in key.split("." ): __a = getattr(a , a ) if weight_type is not None: __a = getattr(a , a ).shape else: __a = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __a = value elif weight_type == "weight_g": __a = value elif weight_type == "weight_v": __a = value elif weight_type == "bias": __a = value else: __a = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _lowerCamelCase( a , a ): __a = [] __a = fairseq_model.state_dict() __a = hf_model.feature_extractor __a = hf_model.adapter for name, value in fairseq_dict.items(): __a = False if "conv_layers" in name: load_conv_layer( a , a , a , a , hf_model.config.feat_extract_norm == "group" , ) __a = True elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ): load_adapter(a , a , a , a ) __a = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: __a = True if "*" in mapped_key: __a = name.split(a )[0].split("." )[-2] __a = mapped_key.replace("*" , a ) if "weight_g" in name: __a = "weight_g" elif "weight_v" in name: __a = "weight_v" elif "bias" in name: __a = "bias" elif "weight" in name: __a = "weight" else: __a = None set_recursively(a , a , a , a , a ) continue if not is_used: unused_weights.append(a ) logger.warning(F"Unused weights: {unused_weights}" ) def _lowerCamelCase( a , a , a , a , a ): __a = full_name.split("conv_layers." )[-1] __a = name.split("." ) __a = int(items[0] ) __a = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __a = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __a = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) __a = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) __a = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(a ) def _lowerCamelCase( a , a , a , a ): __a = full_name.split("adaptor." )[-1] __a = name.split("." ) if items[1].isdigit(): __a = int(items[1] ) else: __a = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found." __a = value logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found." __a = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found." __a = value logger.info(F"Adapter proj layer bias was initialized from {full_name}." ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found." __a = value logger.info(F"Adapter proj layer weight was initialized from {full_name}." ) elif isinstance(a , a ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found." __a = value logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found." __a = value logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." ) else: unused_weights.append(a ) def _lowerCamelCase( a ): __a , __a = emb.weight.shape __a = nn.Linear(a , a , bias=a ) __a = emb.weight.data return lin_layer @torch.no_grad() def _lowerCamelCase( a , a , a , a , a , a , a , a , a , a , a , ): __a = WavaVecaConfig.from_pretrained( a , add_adapter=a , adapter_stride=a , adapter_kernel_size=a , use_auth_token=a , output_hidden_size=a , ) __a = MBartConfig.from_pretrained(a ) # load model __a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ "config_yaml": config_yaml_path, "data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path, "load_pretrained_decoder_from": None, } , ) __a = model[0].eval() # load feature extractor __a = WavaVecaFeatureExtractor.from_pretrained(a , use_auth_token=a ) # set weights for wav2vec2 encoder __a = WavaVecaModel(a ) recursively_load_weights_wavaveca(model.encoder , a ) # load decoder weights __a = MBartForCausalLM(a ) __a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a ) logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) __a = SpeechEncoderDecoderModel(encoder=a , decoder=a ) __a = False __a = MBartaaTokenizer(a ) tokenizer.save_pretrained(a ) __a = hf_wavavec.config.to_dict() __a = tokenizer.pad_token_id __a = tokenizer.bos_token_id __a = tokenizer.eos_token_id __a = "mbart50" __a = "wav2vec2" __a = tokenizer.eos_token_id __a = 2_5_0_0_0_4 __a = tokenizer.eos_token_id __a = SpeechEncoderDecoderConfig.from_dict(a ) hf_wavavec.save_pretrained(a ) feature_extractor.save_pretrained(a ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""") parser.add_argument( """--encoder_config_path""", default="""facebook/wav2vec2-xls-r-1b""", type=str, help="""Path to hf encoder wav2vec2 checkpoint config""", ) parser.add_argument( """--decoder_config_path""", default="""facebook/mbart-large-50-one-to-many-mmt""", type=str, help="""Path to hf decoder checkpoint config""", ) parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""") parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""") parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""") parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""") parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""") SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
261
1
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __lowerCamelCase ( A__ , A__ ): '''simple docstring''' a_ : Union[str, Any] = 1 @register_to_config def __init__( self : Optional[Any] , a_ : str=20_00 , a_ : Union[str, Any]=0.1 , a_ : int=20 , a_ : Optional[Any]=1e-3 ): lowerCAmelCase_ : Dict = None lowerCAmelCase_ : Optional[Any] = None lowerCAmelCase_ : Optional[int] = None def lowerCamelCase ( self : str , a_ : int , a_ : Union[str, torch.device] = None ): lowerCAmelCase_ : int = torch.linspace(1 , self.config.sampling_eps , a_ , device=a_ ) def lowerCamelCase ( self : int , a_ : Optional[Any] , a_ : Optional[Any] , a_ : Dict , a_ : Any=None ): if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score lowerCAmelCase_ : List[Any] = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) lowerCAmelCase_ : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) lowerCAmelCase_ : Optional[int] = std.flatten() while len(std.shape ) < len(score.shape ): lowerCAmelCase_ : Optional[int] = std.unsqueeze(-1 ) lowerCAmelCase_ : Optional[Any] = -score / std # compute lowerCAmelCase_ : Optional[int] = -1.0 / len(self.timesteps ) lowerCAmelCase_ : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) lowerCAmelCase_ : Optional[int] = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): lowerCAmelCase_ : List[str] = beta_t.unsqueeze(-1 ) lowerCAmelCase_ : Optional[int] = -0.5 * beta_t * x lowerCAmelCase_ : List[Any] = torch.sqrt(a_ ) lowerCAmelCase_ : str = drift - diffusion**2 * score lowerCAmelCase_ : Tuple = x + drift * dt # add noise lowerCAmelCase_ : List[str] = randn_tensor(x.shape , layout=x.layout , generator=a_ , device=x.device , dtype=x.dtype ) lowerCAmelCase_ : Any = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self : Any ): return self.config.num_train_timesteps
161
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __lowerCamelCase ( A__ , unittest.TestCase ): '''simple docstring''' a_ : str = """ssube/stable-diffusion-x4-upscaler-onnx""" def lowerCamelCase ( self : Any , a_ : Dict=0 ): lowerCAmelCase_ : Dict = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(a_ ) ) lowerCAmelCase_ : Tuple = torch.manual_seed(a_ ) lowerCAmelCase_ : List[str] = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowerCamelCase ( self : Union[str, Any] ): lowerCAmelCase_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=a_ ) lowerCAmelCase_ : Tuple = self.get_dummy_inputs() lowerCAmelCase_ : List[Any] = pipe(**a_ ).images lowerCAmelCase_ : str = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase_ : int = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowerCamelCase ( self : Optional[Any] ): lowerCAmelCase_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase_ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCAmelCase_ : Optional[Any] = self.get_dummy_inputs() lowerCAmelCase_ : Union[str, Any] = pipe(**a_ ).images lowerCAmelCase_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase_ : str = np.array( [0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self : Union[str, Any] ): lowerCAmelCase_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase_ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a_ ) lowerCAmelCase_ : Optional[int] = self.get_dummy_inputs() lowerCAmelCase_ : Tuple = pipe(**a_ ).images lowerCAmelCase_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase_ : str = np.array( [0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self : Dict ): lowerCAmelCase_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase_ : str = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a_ ) lowerCAmelCase_ : Tuple = self.get_dummy_inputs() lowerCAmelCase_ : List[Any] = pipe(**a_ ).images lowerCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase_ : Optional[int] = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase ( self : Tuple ): lowerCAmelCase_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a_ ) lowerCAmelCase_ : Tuple = self.get_dummy_inputs() lowerCAmelCase_ : List[str] = pipe(**a_ ).images lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase_ : Optional[Any] = np.array( [0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @property def lowerCamelCase ( self : Optional[Any] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCamelCase ( self : List[Any] ): lowerCAmelCase_ : Tuple = ort.SessionOptions() lowerCAmelCase_ : List[str] = False return options def lowerCamelCase ( self : Any ): lowerCAmelCase_ : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) lowerCAmelCase_ : List[Any] = init_image.resize((1_28, 1_28) ) # using the PNDM scheduler by default lowerCAmelCase_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a_ ) lowerCAmelCase_ : Dict = "A fantasy landscape, trending on artstation" lowerCAmelCase_ : Any = torch.manual_seed(0 ) lowerCAmelCase_ : Dict = pipe( prompt=a_ , image=a_ , guidance_scale=7.5 , num_inference_steps=10 , generator=a_ , output_type="np" , ) lowerCAmelCase_ : Dict = output.images lowerCAmelCase_ : Optional[int] = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 5_12, 3) lowerCAmelCase_ : Optional[int] = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowerCamelCase ( self : str ): lowerCAmelCase_ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) lowerCAmelCase_ : Union[str, Any] = init_image.resize((1_28, 1_28) ) lowerCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" ) lowerCAmelCase_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a_ ) lowerCAmelCase_ : Any = "A fantasy landscape, trending on artstation" lowerCAmelCase_ : Optional[int] = torch.manual_seed(0 ) lowerCAmelCase_ : List[str] = pipe( prompt=a_ , image=a_ , guidance_scale=7.5 , num_inference_steps=20 , generator=a_ , output_type="np" , ) lowerCAmelCase_ : List[str] = output.images lowerCAmelCase_ : Any = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 5_12, 3) lowerCAmelCase_ : List[str] = np.array( [0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
161
1
"""simple docstring""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = BlenderbotSmallTokenizer SCREAMING_SNAKE_CASE__ : List[Any] = False def UpperCamelCase__ ( self ): """simple docstring""" super().setUp() UpperCAmelCase_ : List[str] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"] UpperCAmelCase_ : Any = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) UpperCAmelCase_ : Dict = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""] UpperCAmelCase_ : str = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"} UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = "adapt act apte" UpperCAmelCase_ : str = "adapt act apte" return input_text, output_text def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase_ : Any = "adapt act apte" UpperCAmelCase_ : Union[str, Any] = ["adapt", "act", "ap@@", "te"] UpperCAmelCase_ : str = tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) UpperCAmelCase_ : List[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] UpperCAmelCase_ : Optional[Any] = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) assert tok("sam" ).input_ids == [1384] UpperCAmelCase_ : List[Any] = "I am a small frog." UpperCAmelCase_ : List[Any] = tok([src_text] , padding=lowercase_ , truncation=lowercase_ )["input_ids"] UpperCAmelCase_ : Dict = tok.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Tuple = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) UpperCAmelCase_ : List[str] = "I am a small frog ." UpperCAmelCase_ : Any = "." UpperCAmelCase_ : Union[str, Any] = tok(lowercase_ )["input_ids"] UpperCAmelCase_ : List[Any] = tok(lowercase_ )["input_ids"] assert encoded[-1] == encoded_dot[0]
61
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : Dict = filter(lambda __lowerCAmelCase : p.requires_grad , model.parameters() ) _UpperCAmelCase : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCamelCase__ = logging.getLogger(__name__) def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ): if metric == "rouge2": _UpperCAmelCase : List[str] = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": _UpperCAmelCase : int = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": _UpperCAmelCase : str = "{val_avg_em:.4f}-{step_count}" elif metric == "loss": _UpperCAmelCase : List[Any] = "{val_avg_loss:.4f}-{step_count}" else: raise NotImplementedError( F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" " function." ) _UpperCAmelCase : Any = ModelCheckpoint( dirpath=__lowerCAmelCase , filename=__lowerCAmelCase , monitor=F"""val_{metric}""" , mode="max" , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ): return EarlyStopping( monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=__lowerCAmelCase , verbose=__lowerCAmelCase , ) class lowerCAmelCase__ ( pl.Callback ): def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ) ->int: '''simple docstring''' _UpperCAmelCase : Dict = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(lowerCamelCase__ ) @rank_zero_only def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : pl.LightningModule , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any]=True ) ->None: '''simple docstring''' logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) _UpperCAmelCase : str = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results _UpperCAmelCase : Any = Path(pl_module.hparams.output_dir ) if type_path == "test": _UpperCAmelCase : List[Any] = od / "test_results.txt" _UpperCAmelCase : Union[str, Any] = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _UpperCAmelCase : Optional[Any] = od / F"""{type_path}_results/{trainer.global_step:05d}.txt""" _UpperCAmelCase : Dict = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=lowerCamelCase__ ) generations_file.parent.mkdir(exist_ok=lowerCamelCase__ ) with open(lowerCamelCase__ , "a+" ) as writer: for key in sorted(lowerCamelCase__ ): if key in ["log", "progress_bar", "preds"]: continue _UpperCAmelCase : Dict = metrics[key] if isinstance(lowerCamelCase__ , torch.Tensor ): _UpperCAmelCase : Tuple = val.item() _UpperCAmelCase : str = F"""{key}: {val:.6f}\n""" writer.write(lowerCamelCase__ ) if not save_generations: return if "preds" in metrics: _UpperCAmelCase : Optional[int] = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(lowerCamelCase__ ) @rank_zero_only def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ) ->Any: '''simple docstring''' try: _UpperCAmelCase : Tuple = pl_module.model.model.num_parameters() except AttributeError: _UpperCAmelCase : int = pl_module.model.num_parameters() _UpperCAmelCase : int = count_trainable_parameters(lowerCamelCase__ ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} ) @rank_zero_only def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : pl.LightningModule ) ->int: '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(lowerCamelCase__ , lowerCamelCase__ , "test" ) @rank_zero_only def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : Union[str, Any] ) ->Union[str, Any]: '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
234
0
def lowerCamelCase_ ( lowerCamelCase__ = 1_0_0_0_0_0_0 ): lowerCamelCase_ = set(range(3 , lowerCamelCase__ , 2 ) ) primes.add(2 ) for p in range(3 , lowerCamelCase__ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , lowerCamelCase__ , lowerCamelCase__ ) ) ) lowerCamelCase_ = [float(lowerCamelCase__ ) for n in range(limit + 1 )] for p in primes: for n in range(lowerCamelCase__ , limit + 1 , lowerCamelCase__ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F"""{solution() = }""")
351
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) __A ={ '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =[ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys __A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
47
0
"""simple docstring""" import datasets from .evaluate import evaluate SCREAMING_SNAKE_CASE_ : Any = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n' SCREAMING_SNAKE_CASE_ : Tuple = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n' SCREAMING_SNAKE_CASE_ : List[str] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): """simple docstring""" def UpperCamelCase ( self: str ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )}, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , ) def UpperCamelCase ( self: Any , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] ): """simple docstring""" A__ = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} A__ = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] A__ = evaluate(dataset=UpperCamelCase , predictions=UpperCamelCase ) return score
335
"""simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float] SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float] def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ): A__ = end_pointa[0] - end_pointa[0] A__ = end_pointa[1] - end_pointa[1] A__ = end_pointa[2] - end_pointa[2] return (x, y, z) def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ): A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ): return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0) def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ): A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ ) A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ ) return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
335
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase__ = { "configuration_table_transformer": [ "TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TableTransformerConfig", "TableTransformerOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TableTransformerForObjectDetection", "TableTransformerModel", "TableTransformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TableTransformerConfig, TableTransformerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TableTransformerForObjectDetection, TableTransformerModel, TableTransformerPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
26
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCAmelCase__ = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } UpperCAmelCase__ = { "distilbert-base-uncased": 512, "distilbert-base-uncased-distilled-squad": 512, "distilbert-base-cased": 512, "distilbert-base-cased-distilled-squad": 512, "distilbert-base-german-cased": 512, "distilbert-base-multilingual-cased": 512, } UpperCAmelCase__ = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class lowercase_ ( lowercase ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case = PRETRAINED_INIT_CONFIGURATION __snake_case = ['''input_ids''', '''attention_mask'''] __snake_case = DistilBertTokenizer def __init__( self : Dict , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[int]="[UNK]" , __UpperCAmelCase : str="[SEP]" , __UpperCAmelCase : Tuple="[PAD]" , __UpperCAmelCase : Any="[CLS]" , __UpperCAmelCase : int="[MASK]" , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : str , ) ->Optional[int]: """simple docstring""" super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , ) a = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars ): a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) ) a = do_lower_case a = strip_accents a = tokenize_chinese_chars a = normalizer_class(**__UpperCAmelCase ) a = do_lower_case def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int]=None ) ->Optional[Any]: """simple docstring""" a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]: """simple docstring""" a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]: """simple docstring""" a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
26
1
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") A : Tuple = logging.getLogger(__name__) @dataclass class _lowercase : """simple docstring""" A__ = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) A__ = field( default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"}) A__ = field( default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) A__ = field( default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) A__ = field( default=lowercase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) A__ = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) A__ = field( default=lowercase__ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) @dataclass class _lowercase : """simple docstring""" A__ = field(default=lowercase__ , metadata={"help": "The input training data file (a text file)."}) A__ = field( default=lowercase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) A__ = field( default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"}) A__ = field( default=lowercase__ , metadata={"help": "The number of processes to use for the preprocessing."} , ) A__ = field( default=lowercase__ , metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ = field( default=lowercase__ , metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) } , ) A__ = field( default=lowercase__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) A__ = field( default=lowercase__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.train_file is not None: lowerCamelCase__ : Optional[int] = self.train_file.split("." )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: lowerCamelCase__ : Tuple = self.validation_file.split("." )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class _lowercase : """simple docstring""" A__ = 42 A__ = True A__ = None A__ = None def __call__( self : Optional[int] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Dict = "label" if "label" in features[0].keys() else "labels" lowerCamelCase__ : Optional[int] = [feature.pop(__lowerCamelCase ) for feature in features] lowerCamelCase__ : Dict = len(__lowerCamelCase ) lowerCamelCase__ : List[str] = len(features[0]["input_ids"] ) lowerCamelCase__ : int = [ [{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase )] for feature in features ] lowerCamelCase__ : Dict = list(chain(*__lowerCamelCase ) ) lowerCamelCase__ : Dict = self.tokenizer.pad( __lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten lowerCamelCase__ : Dict = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1 ) for k, v in batch.items()} # Add back labels lowerCamelCase__ : Any = torch.tensor(__lowerCamelCase , dtype=torch.intaa ) return batch def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag" , _A , _A ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowerCamelCase__ : int = training_args.get_process_log_level() logger.setLevel(_A ) datasets.utils.logging.set_verbosity(_A ) transformers.utils.logging.set_verbosity(_A ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. lowerCamelCase__ : str = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase__ : Tuple = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: lowerCamelCase__ : Optional[int] = {} if data_args.train_file is not None: lowerCamelCase__ : Optional[int] = data_args.train_file if data_args.validation_file is not None: lowerCamelCase__ : Optional[Any] = data_args.validation_file lowerCamelCase__ : Any = data_args.train_file.split("." )[-1] lowerCamelCase__ : Optional[Any] = load_dataset( _A , data_files=_A , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. lowerCamelCase__ : Any = load_dataset( "swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCamelCase__ : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCamelCase__ : Optional[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. lowerCamelCase__ : List[Any] = [F"ending{i}" for i in range(4 )] lowerCamelCase__ : str = "sent1" lowerCamelCase__ : Tuple = "sent2" if data_args.max_seq_length is None: lowerCamelCase__ : Dict = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) lowerCamelCase__ : Optional[Any] = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) lowerCamelCase__ : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(_A : Any ): lowerCamelCase__ : Optional[Any] = [[context] * 4 for context in examples[context_name]] lowerCamelCase__ : str = examples[question_header_name] lowerCamelCase__ : Any = [ [F"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(_A ) ] # Flatten out lowerCamelCase__ : Any = list(chain(*_A ) ) lowerCamelCase__ : Optional[int] = list(chain(*_A ) ) # Tokenize lowerCamelCase__ : Union[str, Any] = tokenizer( _A , _A , truncation=_A , max_length=_A , padding="max_length" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(_A ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) lowerCamelCase__ : int = raw_datasets["train"] if data_args.max_train_samples is not None: lowerCamelCase__ : List[str] = min(len(_A ) , data_args.max_train_samples ) lowerCamelCase__ : Union[str, Any] = train_dataset.select(range(_A ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): lowerCamelCase__ : int = train_dataset.map( _A , batched=_A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) lowerCamelCase__ : Tuple = raw_datasets["validation"] if data_args.max_eval_samples is not None: lowerCamelCase__ : str = min(len(_A ) , data_args.max_eval_samples ) lowerCamelCase__ : Optional[Any] = eval_dataset.select(range(_A ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): lowerCamelCase__ : Any = eval_dataset.map( _A , batched=_A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator lowerCamelCase__ : Any = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=_A , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(_A : List[str] ): lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = eval_predictions lowerCamelCase__ : str = np.argmax(_A , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer lowerCamelCase__ : Optional[int] = Trainer( model=_A , args=_A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_A , data_collator=_A , compute_metrics=_A , ) # Training if training_args.do_train: lowerCamelCase__ : List[str] = None if training_args.resume_from_checkpoint is not None: lowerCamelCase__ : Any = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCamelCase__ : List[str] = last_checkpoint lowerCamelCase__ : Any = trainer.train(resume_from_checkpoint=_A ) trainer.save_model() # Saves the tokenizer too for easy upload lowerCamelCase__ : int = train_result.metrics lowerCamelCase__ : List[Any] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_A ) ) lowerCamelCase__ : Any = min(_A , len(_A ) ) trainer.log_metrics("train" , _A ) trainer.save_metrics("train" , _A ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) lowerCamelCase__ : str = trainer.evaluate() lowerCamelCase__ : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_A ) lowerCamelCase__ : Tuple = min(_A , len(_A ) ) trainer.log_metrics("eval" , _A ) trainer.save_metrics("eval" , _A ) lowerCamelCase__ : Optional[int] = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**_A ) else: trainer.create_model_card(**_A ) def lowercase_ ( _A : List[Any] ): """simple docstring""" main() if __name__ == "__main__": main()
184
import datasets from .evaluate import evaluate A : Dict = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n" A : Any = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n" A : str = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _lowercase ( datasets.Metric): """simple docstring""" def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": { "id": datasets.Value("string" ), "prediction_text": datasets.features.Sequence(datasets.Value("string" ) ), }, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ): '''simple docstring''' lowerCamelCase__ : int = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} lowerCamelCase__ : Any = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] lowerCamelCase__ : Optional[int] = evaluate(dataset=__lowerCamelCase , predictions=__lowerCamelCase ) return score
184
1
from __future__ import annotations class snake_case__: """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple=None ): lowercase__ : Tuple = data lowercase__ : Optional[Any] = None def __repr__( self : int ): lowercase__ : int = [] lowercase__ : Union[str, Any] = self while temp: string_rep.append(f"""{temp.data}""" ) lowercase__ : str = temp.next return "->".join(_a ) def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if not elements_list: raise Exception("The Elements List is empty" ) lowercase__ : int = Node(elements_list[0] ) for i in range(1 , len(__lowerCAmelCase ) ): lowercase__ : Union[str, Any] = Node(elements_list[i] ) lowercase__ : Union[str, Any] = current.next return head def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" if head_node is not None and isinstance(__lowerCAmelCase , __lowerCAmelCase ): print_reverse(head_node.next ) print(head_node.data ) def __lowerCamelCase ( ): """simple docstring""" from doctest import testmod testmod() lowercase__ : List[Any] = make_linked_list([14, 52, 14, 12, 43] ) print("Linked List:" ) print(__lowerCAmelCase ) print("Elements in Reverse:" ) print_reverse(__lowerCAmelCase ) if __name__ == "__main__": main()
362
def __lowerCamelCase ( lowerCamelCase__ = 1_000 ): """simple docstring""" lowercase__ , lowercase__ : int = 1, 1 lowercase__ : List[Any] = [] for i in range(1 , n + 1 ): lowercase__ : Dict = prev_numerator + 2 * prev_denominator lowercase__ : Tuple = prev_numerator + prev_denominator if len(str(lowerCamelCase__ ) ) > len(str(lowerCamelCase__ ) ): result.append(lowerCamelCase__ ) lowercase__ : int = numerator lowercase__ : int = denominator return len(lowerCamelCase__ ) if __name__ == "__main__": print(f'''{solution() = }''')
121
0
"""simple docstring""" import random class lowercase : @staticmethod def _snake_case ( lowercase ) -> tuple[list[int], list[int]]: lowerCAmelCase = [ord(lowercase ) for i in text] lowerCAmelCase = [] lowerCAmelCase = [] for i in plain: lowerCAmelCase = random.randint(1 , 300 ) lowerCAmelCase = (i + k) * k cipher.append(lowercase ) key.append(lowercase ) return cipher, key @staticmethod def _snake_case ( lowercase , lowercase ) -> str: lowerCAmelCase = [] for i in range(len(lowercase ) ): lowerCAmelCase = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(lowercase ) ) return "".join(lowercase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = Onepad().encrypt("Hello") print(c, k) print(Onepad().decrypt(c, k))
46
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase__ : Union[str, Any] = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase__ : Optional[int] = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase__ : Optional[Any] = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase__ : Any = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_12, '''facebook/dpr-ctx_encoder-multiset-base''': 5_12, } UpperCamelCase__ : Optional[Any] = { '''facebook/dpr-question_encoder-single-nq-base''': 5_12, '''facebook/dpr-question_encoder-multiset-base''': 5_12, } UpperCamelCase__ : Dict = { '''facebook/dpr-reader-single-nq-base''': 5_12, '''facebook/dpr-reader-multiset-base''': 5_12, } UpperCamelCase__ : Optional[int] = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } UpperCamelCase__ : Optional[Any] = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } UpperCamelCase__ : Any = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class _UpperCamelCase ( lowerCamelCase__ ): '''simple docstring''' _A : str = VOCAB_FILES_NAMES _A : str = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP _A : Any = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION _A : Any = DPRContextEncoderTokenizer class _UpperCamelCase ( lowerCamelCase__ ): '''simple docstring''' _A : Dict = VOCAB_FILES_NAMES _A : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP _A : Tuple = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION _A : Dict = DPRQuestionEncoderTokenizer UpperCamelCase__ : str = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) UpperCamelCase__ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) UpperCamelCase__ : Any = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(lowerCamelCase__ ) class _UpperCamelCase : '''simple docstring''' def __call__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : str , ): """simple docstring""" if titles is None and texts is None: return super().__call__( lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) elif titles is None or texts is None: __SCREAMING_SNAKE_CASE : List[Any] = titles if texts is None else texts return super().__call__( lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE : Dict = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [titles] __SCREAMING_SNAKE_CASE : Tuple = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [texts] __SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [questions] * n_passages assert len(lowerCAmelCase__ ) == len( lowerCAmelCase__ ), F"There should be as many titles than texts but got {len(lowerCAmelCase__ )} titles and {len(lowerCAmelCase__ )} texts." __SCREAMING_SNAKE_CASE : int = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["""input_ids"""] __SCREAMING_SNAKE_CASE : str = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["""input_ids"""] __SCREAMING_SNAKE_CASE : Optional[Any] = { """input_ids""": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ] } if return_attention_mask is not False: __SCREAMING_SNAKE_CASE : int = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __SCREAMING_SNAKE_CASE : int = attention_mask return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : BatchEncoding , lowerCAmelCase__ : DPRReaderOutput , lowerCAmelCase__ : int = 1_6 , lowerCAmelCase__ : int = 6_4 , lowerCAmelCase__ : int = 4 , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = reader_input["""input_ids"""] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = reader_output[:3] __SCREAMING_SNAKE_CASE : Tuple = len(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = sorted(range(lowerCAmelCase__ ) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__ ) __SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = [] for doc_id in sorted_docs: __SCREAMING_SNAKE_CASE : Tuple = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __SCREAMING_SNAKE_CASE : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __SCREAMING_SNAKE_CASE : List[Any] = sequence_ids.index(self.pad_token_id ) else: __SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(lowerCAmelCase__ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def UpperCamelCase__ ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [] for start_index, start_score in enumerate(lowerCAmelCase__ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __SCREAMING_SNAKE_CASE : Optional[Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] , reverse=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F"Wrong span indices: [{start_index}:{end_index}]" __SCREAMING_SNAKE_CASE : Optional[Any] = end_index - start_index + 1 assert length <= max_answer_length, F"Span is too long: {length} > {max_answer_length}" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase__ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(lowerCamelCase__ ) class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' _A : Optional[int] = VOCAB_FILES_NAMES _A : int = READER_PRETRAINED_VOCAB_FILES_MAP _A : str = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : str = READER_PRETRAINED_INIT_CONFIGURATION _A : Dict = ['''input_ids''', '''attention_mask'''] _A : Tuple = DPRReaderTokenizer
112
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase_ = logging.get_logger(__name__) lowercase_ = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } lowercase_ = { """vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""}, """merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""}, """tokenizer_config_file""": { """facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json""" }, } lowercase_ = {"""facebook/blenderbot-3B""": 128} class _snake_case ( lowercase__): UpperCamelCase__ : Any =VOCAB_FILES_NAMES UpperCamelCase__ : Tuple =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : str =["""input_ids""", """attention_mask"""] UpperCamelCase__ : int =BlenderbotTokenizer def __init__( self : Any, __lowercase : Optional[Any]=None, __lowercase : int=None, __lowercase : Optional[Any]=None, __lowercase : Union[str, Any]="replace", __lowercase : Optional[Any]="<s>", __lowercase : List[str]="</s>", __lowercase : int="</s>", __lowercase : Optional[int]="<s>", __lowercase : Any="<unk>", __lowercase : int="<pad>", __lowercase : Any="<mask>", __lowercase : List[str]=False, __lowercase : str=True, **__lowercase : Optional[int], ): super().__init__( __lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, ) lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space: lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) ) lowercase__ = add_prefix_space lowercase__ = pre_tok_class(**__lowercase ) lowercase__ = add_prefix_space lowercase__ = "post_processor" lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase ) if tokenizer_component_instance: lowercase__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase__ = tuple(state["sep"] ) if "cls" in state: lowercase__ = tuple(state["cls"] ) lowercase__ = False if state.get("add_prefix_space", __lowercase ) != add_prefix_space: lowercase__ = add_prefix_space lowercase__ = True if state.get("trim_offsets", __lowercase ) != trim_offsets: lowercase__ = trim_offsets lowercase__ = True if changes_to_apply: lowercase__ = getattr(__lowercase, state.pop("type" ) ) lowercase__ = component_class(**__lowercase ) setattr(self.backend_tokenizer, __lowercase, __lowercase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def A__ ( self : int ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def A__ ( self : Any, __lowercase : Union[str, Any] ): lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value lowercase__ = value def A__ ( self : Any, *__lowercase : List[str], **__lowercase : List[str] ): lowercase__ = kwargs.get("is_split_into_words", __lowercase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowercase, **__lowercase ) def A__ ( self : int, *__lowercase : Optional[Any], **__lowercase : Any ): lowercase__ = kwargs.get("is_split_into_words", __lowercase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowercase, **__lowercase ) def A__ ( self : Any, __lowercase : str, __lowercase : Optional[str] = None ): lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase ) return tuple(__lowercase ) def A__ ( self : List[str], __lowercase : List[int], __lowercase : Optional[List[int]] = None ): lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self : Optional[Any], __lowercase : List[int], __lowercase : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def A__ ( self : List[Any], __lowercase : "Conversation" ): lowercase__ = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__lowercase ) lowercase__ = " ".join(__lowercase ) lowercase__ = self.encode(__lowercase ) if len(__lowercase ) > self.model_max_length: lowercase__ = input_ids[-self.model_max_length :] logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
224
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = int(SCREAMING_SNAKE_CASE_ ) if decimal in (0, 1): # Exit cases for the recursion return str(SCREAMING_SNAKE_CASE_ ) lowercase__ , lowercase__ = divmod(SCREAMING_SNAKE_CASE_ , 2 ) return binary_recursive(SCREAMING_SNAKE_CASE_ ) + str(SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = str(SCREAMING_SNAKE_CASE_ ).strip() if not number: raise ValueError("No input value was provided" ) lowercase__ = "-" if number.startswith("-" ) else "" lowercase__ = number.lstrip("-" ) if not number.isnumeric(): raise ValueError("Input value is not an integer" ) return f'''{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE_ ) )}''' if __name__ == "__main__": from doctest import testmod testmod()
224
1
from __future__ import annotations import time a =list[tuple[int, int]] a =[ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] a =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class A_ : def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Node | None): __lowerCamelCase : Tuple = pos_x __lowerCamelCase : List[str] = pos_y __lowerCamelCase : str = (pos_y, pos_x) __lowerCamelCase : str = goal_x __lowerCamelCase : int = goal_y __lowerCamelCase : List[Any] = parent class A_ : def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tuple[int, int] ,SCREAMING_SNAKE_CASE__ : tuple[int, int]): __lowerCamelCase : Any = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = [self.start] __lowerCamelCase : List[str] = False def lowerCAmelCase ( self : List[Any]): while self.node_queue: __lowerCamelCase : Any = self.node_queue.pop(0) if current_node.pos == self.target.pos: __lowerCamelCase : Dict = True return self.retrace_path(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = self.get_successors(SCREAMING_SNAKE_CASE__) for node in successors: self.node_queue.append(SCREAMING_SNAKE_CASE__) if not self.reached: return [self.start.pos] return None def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Node): __lowerCamelCase : Union[str, Any] = [] for action in delta: __lowerCamelCase : Optional[Any] = parent.pos_x + action[1] __lowerCamelCase : Optional[int] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE__) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.target.pos_y ,self.target.pos_x ,SCREAMING_SNAKE_CASE__)) return successors def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Node | None): __lowerCamelCase : List[Any] = node __lowerCamelCase : int = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) __lowerCamelCase : int = current_node.parent path.reverse() return path class A_ : def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int): __lowerCamelCase : int = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = False def lowerCAmelCase ( self : str): while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: __lowerCamelCase : Any = self.fwd_bfs.node_queue.pop(0) __lowerCamelCase : Any = self.bwd_bfs.node_queue.pop(0) if current_bwd_node.pos == current_fwd_node.pos: __lowerCamelCase : List[str] = True return self.retrace_bidirectional_path( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = current_bwd_node __lowerCamelCase : int = current_fwd_node __lowerCamelCase : str = { self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE__), self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE__), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(SCREAMING_SNAKE_CASE__) if not self.reached: return [self.fwd_bfs.start.pos] return None def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Node ,SCREAMING_SNAKE_CASE__ : Node): __lowerCamelCase : List[Any] = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__) bwd_path.pop() bwd_path.reverse() __lowerCamelCase : List[Any] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() a =(0, 0) a =(len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) a =time.time() a =BreadthFirstSearch(init, goal) a =bfs.search() a =time.time() - start_bfs_time print("""Unidirectional BFS computation time : """, bfs_time) a =time.time() a =BidirectionalBreadthFirstSearch(init, goal) a =bd_bfs.search() a =time.time() - start_bd_bfs_time print("""Bidirectional BFS computation time : """, bd_bfs_time)
73
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) SCREAMING_SNAKE_CASE_ = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) SCREAMING_SNAKE_CASE_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[str] = FLAX_MODEL_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class UpperCamelCase__ ( _BaseAutoModelClass ): '''simple docstring''' __snake_case : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING SCREAMING_SNAKE_CASE_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
296
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class lowerCAmelCase__ ( A_ ): __a = """roberta""" def __init__( self : str , _lowerCamelCase : Optional[int]=50265 , _lowerCamelCase : str=768 , _lowerCamelCase : Optional[int]=12 , _lowerCamelCase : int=12 , _lowerCamelCase : List[str]=3072 , _lowerCamelCase : str="gelu" , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Dict=512 , _lowerCamelCase : Any=2 , _lowerCamelCase : Optional[int]=0.0_2 , _lowerCamelCase : Union[str, Any]=1e-12 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=0 , _lowerCamelCase : Any=2 , _lowerCamelCase : List[str]="absolute" , _lowerCamelCase : List[str]=True , _lowerCamelCase : Any=None , **_lowerCamelCase : Dict , ): super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = hidden_act _snake_case = intermediate_size _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = initializer_range _snake_case = layer_norm_eps _snake_case = position_embedding_type _snake_case = use_cache _snake_case = classifier_dropout class lowerCAmelCase__ ( A_ ): @property def lowercase ( self : Optional[int] ): if self.task == "multiple-choice": _snake_case = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _snake_case = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
368
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase__ = {'processing_layoutxlm': ['LayoutXLMProcessor']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ['LayoutXLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ['LayoutXLMTokenizerFast'] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
40
0
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef __UpperCAmelCase : Optional[Any] = ( "This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate " "library. You can have a look at this example script for pointers: " "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" ) def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]: warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) requires_backends(SCREAMING_SNAKE_CASE__ , """sklearn""") return (preds == labels).mean() def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[Any]: warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) requires_backends(SCREAMING_SNAKE_CASE__ , """sklearn""") __snake_case: Union[str, Any] = simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) __snake_case: List[str] = fa_score(y_true=SCREAMING_SNAKE_CASE__ , y_pred=SCREAMING_SNAKE_CASE__) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str: warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) requires_backends(SCREAMING_SNAKE_CASE__ , """sklearn""") __snake_case: Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)[0] __snake_case: Tuple = spearmanr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[Any]: warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) requires_backends(SCREAMING_SNAKE_CASE__ , """sklearn""") assert len(SCREAMING_SNAKE_CASE__) == len(SCREAMING_SNAKE_CASE__), F'''Predictions and labels have mismatched lengths {len(SCREAMING_SNAKE_CASE__)} and {len(SCREAMING_SNAKE_CASE__)}''' if task_name == "cola": return {"mcc": matthews_corrcoef(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)} elif task_name == "sst-2": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)} elif task_name == "mrpc": return acc_and_fa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) elif task_name == "sts-b": return pearson_and_spearman(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) elif task_name == "qqp": return acc_and_fa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)} elif task_name == "qnli": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)} elif task_name == "rte": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)} elif task_name == "wnli": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)} elif task_name == "hans": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)} else: raise KeyError(SCREAMING_SNAKE_CASE__) def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str: warnings.warn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) requires_backends(SCREAMING_SNAKE_CASE__ , """sklearn""") if len(SCREAMING_SNAKE_CASE__) != len(SCREAMING_SNAKE_CASE__): raise ValueError(F'''Predictions and labels have mismatched lengths {len(SCREAMING_SNAKE_CASE__)} and {len(SCREAMING_SNAKE_CASE__)}''') if task_name == "xnli": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)} else: raise KeyError(SCREAMING_SNAKE_CASE__)
111
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class __snake_case ( unittest.TestCase , __lowerCamelCase ): '''simple docstring''' def UpperCAmelCase__ ( self : Union[str, Any] ): __snake_case: Optional[int] = load_tool("""text-classification""" ) self.tool.setup() __snake_case: Dict = load_tool("""text-classification""" , remote=A ) def UpperCAmelCase__ ( self : Optional[int] ): __snake_case: List[str] = self.tool("""That's quite cool""" , ["""positive""", """negative"""] ) self.assertEqual(A , """positive""" ) def UpperCAmelCase__ ( self : Dict ): __snake_case: List[str] = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] ) self.assertEqual(A , """positive""" ) def UpperCAmelCase__ ( self : Optional[Any] ): __snake_case: Optional[Any] = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] ) self.assertEqual(A , """positive""" ) def UpperCAmelCase__ ( self : Tuple ): __snake_case: int = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] ) self.assertEqual(A , """positive""" )
111
1
from string import ascii_uppercase UpperCAmelCase_ = {char: i for i, char in enumerate(ascii_uppercase)} UpperCAmelCase_ = dict(enumerate(ascii_uppercase)) def lowerCamelCase__ ( A__ : str , A__ : str ): '''simple docstring''' __lowerCamelCase = len(A__ ) __lowerCamelCase = 0 while True: if x == i: __lowerCamelCase = 0 if len(A__ ) == len(A__ ): break key += key[i] i += 1 return key def lowerCamelCase__ ( A__ : str , A__ : str ): '''simple docstring''' __lowerCamelCase = """""" __lowerCamelCase = 0 for letter in message: if letter == " ": cipher_text += " " else: __lowerCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def lowerCamelCase__ ( A__ : str , A__ : str ): '''simple docstring''' __lowerCamelCase = """""" __lowerCamelCase = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: __lowerCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = """THE GERMAN ATTACK""" __lowerCamelCase = """SECRET""" __lowerCamelCase = generate_key(A__ , A__ ) __lowerCamelCase = cipher_text(A__ , A__ ) print(f'Encrypted Text = {s}' ) print(f'Original Text = {original_text(A__ , A__ )}' ) if __name__ == "__main__": import doctest doctest.testmod() main()
29
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Union[str, Any] = IFImgaImgSuperResolutionPipeline UpperCAmelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'} UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'}) UpperCAmelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {'latents'} def lowerCAmelCase__ ( self: Optional[int] ): return self._get_superresolution_dummy_components() def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0 ): if str(UpperCamelCase_ ).startswith("""mps""" ): __lowerCamelCase = torch.manual_seed(UpperCamelCase_ ) else: __lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) __lowerCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) __lowerCamelCase = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """original_image""": original_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowerCAmelCase__ ( self: Dict ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def lowerCAmelCase__ ( self: int ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def lowerCAmelCase__ ( self: Optional[Any] ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def lowerCAmelCase__ ( self: Optional[Any] ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def lowerCAmelCase__ ( self: List[str] ): self._test_save_load_local() def lowerCAmelCase__ ( self: List[Any] ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
29
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''SCUT-DLVCLab/lilt-roberta-en-base''': ( '''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json''' ), } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : Tuple = 'lilt' def __init__( self : Optional[int] ,_UpperCAmelCase : int=30522 ,_UpperCAmelCase : Dict=768 ,_UpperCAmelCase : Dict=12 ,_UpperCAmelCase : str=12 ,_UpperCAmelCase : Dict=3072 ,_UpperCAmelCase : List[str]="gelu" ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : Dict=512 ,_UpperCAmelCase : Union[str, Any]=2 ,_UpperCAmelCase : Any=0.02 ,_UpperCAmelCase : Tuple=1E-12 ,_UpperCAmelCase : List[str]=0 ,_UpperCAmelCase : Union[str, Any]="absolute" ,_UpperCAmelCase : Any=None ,_UpperCAmelCase : Optional[Any]=4 ,_UpperCAmelCase : int=1024 ,**_UpperCAmelCase : Union[str, Any] ,): super().__init__(pad_token_id=_UpperCAmelCase ,**_UpperCAmelCase ) _a : Any = vocab_size _a : List[str] = hidden_size _a : Any = num_hidden_layers _a : int = num_attention_heads _a : Optional[int] = hidden_act _a : int = intermediate_size _a : Optional[Any] = hidden_dropout_prob _a : List[Any] = attention_probs_dropout_prob _a : Optional[int] = max_position_embeddings _a : Tuple = type_vocab_size _a : str = initializer_range _a : List[str] = layer_norm_eps _a : Optional[Any] = position_embedding_type _a : Any = classifier_dropout _a : Union[str, Any] = channel_shrink_ratio _a : int = max_ad_position_embeddings
89
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class __magic_name__ ( _UpperCamelCase ): def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,): super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase ) _a : Tuple = Sql( cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,) def __lowercase ( self : Dict ): _a : Optional[Any] = None _a : Dict = None _a : Dict = None _a : Optional[int] = None self.builder.download_and_prepare( download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,) # Build dataset for splits _a : List[str] = self.builder.as_dataset( split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory ) return dataset class __magic_name__ : def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,): if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _a : Dict = dataset _a : List[Any] = name _a : Tuple = con _a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _a : List[Any] = num_proc _a : Tuple = to_sql_kwargs def __lowercase ( self : List[Any] ): _a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase ) _a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase ) _a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase ) _a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs ) return written def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ): _a , _a , _a : Any = args _a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs _a : Dict = query_table( table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,) _a : Tuple = batch.to_pandas() _a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase ) return num_rows or len(_UpperCAmelCase ) def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ): _a : Union[str, Any] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _a , _a : List[Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,): written += num_rows return written
89
1
from typing import Union import fire import torch from tqdm import tqdm def __lowercase ( snake_case_ : Dict ,snake_case_ : Tuple = "cpu" ,snake_case_ : List[str] = None ) ->None: '''simple docstring''' __A : Optional[Any] = torch.load(snake_case_ ,map_location=snake_case_ ) for k, v in tqdm(state_dict.items() ): if not isinstance(snake_case_ ,torch.Tensor ): raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' ) __A : str = v.half() if save_path is None: # overwrite src_path __A : Optional[Any] = src_path torch.save(snake_case_ ,snake_case_ ) if __name__ == "__main__": fire.Fire(convert)
352
"""simple docstring""" import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration a_ = pytest.mark.integration a_ = {"""comet"""} a_ = importlib.util.find_spec("""fairseq""") is not None a_ = {"""code_eval"""} a_ = os.name == """nt""" a_ = {"""bertscore""", """frugalscore""", """perplexity"""} a_ = importlib.util.find_spec("""transformers""") is not None def __lowercase ( snake_case_ : str ) ->Any: '''simple docstring''' @wraps(snake_case_ ) def wrapper(self : List[Any] ,snake_case_ : int ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('''"test requires Fairseq"''' ) else: test_case(self ,snake_case_ ) return wrapper def __lowercase ( snake_case_ : int ) ->str: '''simple docstring''' @wraps(snake_case_ ) def wrapper(self : List[Any] ,snake_case_ : List[str] ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('''"test requires transformers"''' ) else: test_case(self ,snake_case_ ) return wrapper def __lowercase ( snake_case_ : Union[str, Any] ) ->Tuple: '''simple docstring''' @wraps(snake_case_ ) def wrapper(self : int ,snake_case_ : Dict ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('''"test not supported on Windows"''' ) else: test_case(self ,snake_case_ ) return wrapper def __lowercase ( ) ->Tuple: '''simple docstring''' __A : int = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @local class __snake_case ( parameterized.TestCase ): """simple docstring""" _lowerCamelCase = {} _lowerCamelCase = None @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' ) def UpperCamelCase__( self , __lowerCamelCase ): '''simple docstring''' __A : int = '''[...]''' __A : Any = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCamelCase ) ).module_path ) __A : str = datasets.load.import_main_class(metric_module.__name__ , dataset=__lowerCamelCase ) # check parameters __A : Optional[int] = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__lowerCamelCase , metric_module.__name__ ): with self.use_local_metrics(): try: __A : Tuple = doctest.testmod(__lowerCamelCase , verbose=__lowerCamelCase , raise_on_error=__lowerCamelCase ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def UpperCamelCase__( self , __lowerCamelCase ): '''simple docstring''' __A : Any = '''[...]''' __A : Union[str, Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCamelCase ) ).module_path ) # run doctest with self.use_local_metrics(): __A : Union[str, Any] = doctest.testmod(__lowerCamelCase , verbose=__lowerCamelCase , raise_on_error=__lowerCamelCase ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__lowerCamelCase ): yield else: yield @contextmanager def UpperCamelCase__( self ): '''simple docstring''' def load_local_metric(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ): return load_metric(os.path.join('''metrics''' , __lowerCamelCase ) , *__lowerCamelCase , **__lowerCamelCase ) with patch('''datasets.load_metric''' ) as mock_load_metric: __A : List[Any] = load_local_metric yield @classmethod def UpperCamelCase__( cls , __lowerCamelCase ): '''simple docstring''' def wrapper(__lowerCamelCase ): __A : Any = contextmanager(__lowerCamelCase ) __A : Optional[Any] = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('''bleurt''' ) def __lowercase ( snake_case_ : Tuple ) ->int: '''simple docstring''' import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('''sv''' ,'''''' ,'''''' ) # handle pytest cli flags class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def UpperCamelCase__( self , __lowerCamelCase ): '''simple docstring''' assert len(input_dict['''input_ids'''] ) == 2 return np.array([1.0_3, 1.0_4] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor: __A : List[str] = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('''bertscore''' ) def __lowercase ( snake_case_ : List[str] ) ->Dict: '''simple docstring''' import torch def bert_cos_score_idf(snake_case_ : Union[str, Any] ,snake_case_ : List[str] ,*snake_case_ : List[str] ,**snake_case_ : Dict ): return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case_ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('''bert_score.scorer.get_model''' ), patch( '''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf: __A : str = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('''comet''' ) def __lowercase ( snake_case_ : Optional[int] ) ->List[Any]: '''simple docstring''' def load_from_checkpoint(snake_case_ : str ): class __snake_case : """simple docstring""" def UpperCamelCase__( self , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' assert len(__lowerCamelCase ) == 2 __A : str = [0.1_9, 0.9_2] return scores, sum(__lowerCamelCase ) / len(__lowerCamelCase ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('''comet.download_model''' ) as mock_download_model: __A : int = None with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint: __A : Dict = load_from_checkpoint yield def __lowercase ( ) ->str: '''simple docstring''' __A : Optional[Any] = load_metric(os.path.join('''metrics''' ,'''seqeval''' ) ) __A : Optional[int] = '''ERROR''' __A : str = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(snake_case_ ,match=re.escape(snake_case_ ) ): metric.compute(predictions=[] ,references=[] ,scheme=snake_case_ )
291
0
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING a__ : Any = logging.get_logger(__name__) a__ : int = Dict[str, Any] a__ : Dict = List[Prediction] @add_end_docstrings(SCREAMING_SNAKE_CASE) class UpperCamelCase__ ( SCREAMING_SNAKE_CASE): def __init__( self :List[Any] , *_A :List[Any] , **_A :int ) -> Any: '''simple docstring''' super().__init__(*_A , **_A ) if self.framework == "tf": raise ValueError(F'The {self.__class__} is only available in PyTorch.' ) requires_backends(self , 'vision' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def lowercase_ ( self :str , **_A :Tuple ) -> Tuple: '''simple docstring''' __A = {} if "threshold" in kwargs: __A = kwargs['threshold'] return {}, {}, postprocess_kwargs def __call__( self :List[Any] , *_A :List[str] , **_A :Optional[int] ) -> Union[Predictions, List[Prediction]]: '''simple docstring''' return super().__call__(*_A , **_A ) def lowercase_ ( self :Tuple , _A :Optional[int] ) -> Any: '''simple docstring''' __A = load_image(_A ) __A = torch.IntTensor([[image.height, image.width]] ) __A = self.image_processor(images=[image] , return_tensors='pt' ) if self.tokenizer is not None: __A = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' ) __A = target_size return inputs def lowercase_ ( self :Optional[int] , _A :Optional[Any] ) -> Dict: '''simple docstring''' __A = model_inputs.pop('target_size' ) __A = self.model(**_A ) __A = outputs.__class__({'target_size': target_size, **outputs} ) if self.tokenizer is not None: __A = model_inputs['bbox'] return model_outputs def lowercase_ ( self :Tuple , _A :Union[str, Any] , _A :List[str]=0.9 ) -> Optional[Any]: '''simple docstring''' __A = model_outputs['target_size'] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. __A , __A = target_size[0].tolist() def unnormalize(_A :Union[str, Any] ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1_000), (height * bbox[1] / 1_000), (width * bbox[2] / 1_000), (height * bbox[3] / 1_000), ] ) ) __A , __A = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) __A = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] __A = [unnormalize(_A ) for bbox in model_outputs['bbox'].squeeze(0 )] __A = ['score', 'label', 'box'] __A = [dict(zip(_A , _A ) ) for vals in zip(scores.tolist() , _A , _A ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel __A = self.image_processor.post_process_object_detection(_A , _A , _A ) __A = raw_annotations[0] __A = raw_annotation['scores'] __A = raw_annotation['labels'] __A = raw_annotation['boxes'] __A = scores.tolist() __A = [self.model.config.idalabel[label.item()] for label in labels] __A = [self._get_bounding_box(_A ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] __A = ['score', 'label', 'box'] __A = [ dict(zip(_A , _A ) ) for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] ) ] return annotation def lowercase_ ( self :str , _A :"torch.Tensor" ) -> Dict[str, int]: '''simple docstring''' if self.framework != "pt": raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' ) __A , __A , __A , __A = box.int().tolist() __A = { 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, } return bbox
161
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class UpperCamelCase__ ( SCREAMING_SNAKE_CASE): UpperCAmelCase__ : torch.FloatTensor UpperCAmelCase__ : torch.FloatTensor class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE): UpperCAmelCase__ : int = 1 @register_to_config def __init__( self :Optional[Any] , _A :int = 2_000 , _A :float = 0.15 , _A :float = 0.01 , _A :float = 1_348.0 , _A :float = 1E-5 , _A :int = 1 , ) -> Optional[Any]: '''simple docstring''' __A = sigma_max # setable values __A = None self.set_sigmas(_A , _A , _A , _A ) def lowercase_ ( self :Optional[int] , _A :torch.FloatTensor , _A :Optional[int] = None ) -> torch.FloatTensor: '''simple docstring''' return sample def lowercase_ ( self :Tuple , _A :int , _A :float = None , _A :Union[str, torch.device] = None ) -> Optional[int]: '''simple docstring''' __A = sampling_eps if sampling_eps is not None else self.config.sampling_eps __A = torch.linspace(1 , _A , _A , device=_A ) def lowercase_ ( self :Any , _A :int , _A :float = None , _A :float = None , _A :float = None ) -> Optional[int]: '''simple docstring''' __A = sigma_min if sigma_min is not None else self.config.sigma_min __A = sigma_max if sigma_max is not None else self.config.sigma_max __A = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(_A , _A ) __A = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) __A = torch.exp(torch.linspace(math.log(_A ) , math.log(_A ) , _A ) ) __A = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def lowercase_ ( self :List[Any] , _A :Any , _A :Optional[Any] ) -> str: '''simple docstring''' return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def lowercase_ ( self :List[str] , _A :torch.FloatTensor , _A :int , _A :torch.FloatTensor , _A :Optional[torch.Generator] = None , _A :bool = True , ) -> Union[SdeVeOutput, Tuple]: '''simple docstring''' if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) __A = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) __A = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda __A = timesteps.to(self.discrete_sigmas.device ) __A = self.discrete_sigmas[timesteps].to(sample.device ) __A = self.get_adjacent_sigma(_A , _A ).to(sample.device ) __A = torch.zeros_like(_A ) __A = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods __A = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): __A = diffusion.unsqueeze(-1 ) __A = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of __A = randn_tensor( sample.shape , layout=sample.layout , generator=_A , device=sample.device , dtype=sample.dtype ) __A = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? __A = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=_A , prev_sample_mean=_A ) def lowercase_ ( self :str , _A :torch.FloatTensor , _A :torch.FloatTensor , _A :Optional[torch.Generator] = None , _A :bool = True , ) -> Union[SchedulerOutput, Tuple]: '''simple docstring''' if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction __A = randn_tensor(sample.shape , layout=sample.layout , generator=_A ).to(sample.device ) # compute step size from the model_output, the noise, and the snr __A = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() __A = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() __A = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 __A = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term __A = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): __A = step_size.unsqueeze(-1 ) __A = sample + step_size * model_output __A = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_A ) def lowercase_ ( self :Any , _A :torch.FloatTensor , _A :torch.FloatTensor , _A :torch.FloatTensor , ) -> torch.FloatTensor: '''simple docstring''' __A = timesteps.to(original_samples.device ) __A = self.discrete_sigmas.to(original_samples.device )[timesteps] __A = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(_A ) * sigmas[:, None, None, None] ) __A = noise + original_samples return noisy_samples def __len__( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' return self.config.num_train_timesteps
161
1
"""simple docstring""" import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser lowerCAmelCase_ = re.compile(R'\s+') def snake_case( __magic_name__ ) -> str: '''simple docstring''' return {"hash": hashlib.mda(re.sub(lowerCAmelCase__ , '''''' , example['''content'''] ).encode('''utf-8''' ) ).hexdigest()} def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : Dict = [len(lowerCAmelCase__ ) for line in example['''content'''].splitlines()] return {"line_mean": np.mean(lowerCAmelCase__ ), "line_max": max(lowerCAmelCase__ )} def snake_case( __magic_name__ ) -> Optional[Any]: '''simple docstring''' lowercase : List[str] = np.mean([c.isalnum() for c in example['''content''']] ) return {"alpha_frac": alpha_frac} def snake_case( __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' if example["hash"] in uniques: uniques.remove(example['''hash'''] ) return True else: return False def snake_case( __magic_name__ , __magic_name__=5 ) -> Optional[int]: '''simple docstring''' lowercase : Union[str, Any] = ['''auto-generated''', '''autogenerated''', '''automatically generated'''] lowercase : Union[str, Any] = example['''content'''].splitlines() for _, line in zip(range(lowerCAmelCase__ ) , lowerCAmelCase__ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def snake_case( __magic_name__ , __magic_name__=5 , __magic_name__=0.0_5 ) -> str: '''simple docstring''' lowercase : Optional[int] = ['''unit tests''', '''test file''', '''configuration file'''] lowercase : Any = example['''content'''].splitlines() lowercase : List[Any] = 0 lowercase : Union[str, Any] = 0 # first test for _, line in zip(range(lowerCAmelCase__ ) , lowerCAmelCase__ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test lowercase : List[Any] = example['''content'''].count('''\n''' ) lowercase : int = int(coeff * nlines ) for line in lines: count_config += line.lower().count('''config''' ) count_test += line.lower().count('''test''' ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def snake_case( __magic_name__ ) -> Any: '''simple docstring''' lowercase : Union[str, Any] = ['''def ''', '''class ''', '''for ''', '''while '''] lowercase : Any = example['''content'''].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def snake_case( __magic_name__ , __magic_name__=4 ) -> Optional[int]: '''simple docstring''' lowercase : Dict = example['''content'''].splitlines() lowercase : int = 0 for line in lines: counter += line.lower().count('''=''' ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def snake_case( __magic_name__ ) -> Any: '''simple docstring''' lowercase : int = tokenizer(example['''content'''] , truncation=lowerCAmelCase__ )['''input_ids'''] lowercase : List[str] = len(example['''content'''] ) / len(lowerCAmelCase__ ) return {"ratio": ratio} def snake_case( __magic_name__ ) -> List[str]: '''simple docstring''' lowercase : Dict = {} results.update(get_hash(lowerCAmelCase__ ) ) results.update(line_stats(lowerCAmelCase__ ) ) results.update(alpha_stats(lowerCAmelCase__ ) ) results.update(char_token_ratio(lowerCAmelCase__ ) ) results.update(is_autogenerated(lowerCAmelCase__ ) ) results.update(is_config_or_test(lowerCAmelCase__ ) ) results.update(has_no_keywords(lowerCAmelCase__ ) ) results.update(has_few_assignments(lowerCAmelCase__ ) ) return results def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' if not check_uniques(lowerCAmelCase__ , lowerCAmelCase__ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def snake_case( __magic_name__ ) -> List[Any]: '''simple docstring''' with open(lowerCAmelCase__ , '''rb''' ) as f_in: with gzip.open(str(lowerCAmelCase__ ) + '''.gz''' , '''wb''' , compresslevel=6 ) as f_out: shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__ ) os.unlink(lowerCAmelCase__ ) # Settings lowerCAmelCase_ = HfArgumentParser(PreprocessingArguments) lowerCAmelCase_ = parser.parse_args() if args.num_workers is None: lowerCAmelCase_ = multiprocessing.cpu_count() lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset lowerCAmelCase_ = time.time() lowerCAmelCase_ = load_dataset(args.dataset_name, split='train') print(f'''Time to load dataset: {time.time()-t_start:.2f}''') # Run preprocessing lowerCAmelCase_ = time.time() lowerCAmelCase_ = ds.map(preprocess, num_proc=args.num_workers) print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''') # Deduplicate hashes lowerCAmelCase_ = set(ds.unique('hash')) lowerCAmelCase_ = len(uniques) / len(ds) print(f'''Fraction of duplicates: {1-frac:.2%}''') # Deduplicate data and apply heuristics lowerCAmelCase_ = time.time() lowerCAmelCase_ = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args}) print(f'''Time to filter dataset: {time.time()-t_start:.2f}''') print(f'''Size of filtered dataset: {len(ds_filter)}''') # Deduplicate with minhash and jaccard similarity if args.near_deduplication: lowerCAmelCase_ = time.time() lowerCAmelCase_ = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''') print(f'''Size of deduplicate dataset: {len(ds_filter)}''') # Save data in batches of samples_per_file lowerCAmelCase_ = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / 'duplicate_clusters.json', 'w') as f: json.dump(duplicate_clusters, f) lowerCAmelCase_ = output_dir / """data""" data_dir.mkdir(exist_ok=True) lowerCAmelCase_ = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): lowerCAmelCase_ = str(data_dir / f'''file-{file_number+1:012}.json''') lowerCAmelCase_ = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
360
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' lowerCAmelCase_ = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n' lowerCAmelCase_ = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): def __a ( self : List[Any] ) -> Optional[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def __a ( self : List[Any] ) -> int: """simple docstring""" if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def __a ( self : Any , _A : Dict , _A : Any , _A : Any=None , _A : Any="uniform_average" , _A : Optional[Any]=True ) -> Dict: """simple docstring""" lowercase : Any = mean_squared_error( _A , _A , sample_weight=_A , multioutput=_A , squared=_A ) return {"mse": mse}
116
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['ReformerTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['ReformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ReformerAttention', 'ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerLayer', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
110
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase : Any = { "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"], "convert_funnel_original_tf_checkpoint_to_pytorch": [], "tokenization_funnel": ["FunnelTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[Any] = ["FunnelTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[str] = [ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", "FunnelForPreTraining", "FunnelForQuestionAnswering", "FunnelForSequenceClassification", "FunnelForTokenClassification", "FunnelModel", "FunnelPreTrainedModel", "load_tf_weights_in_funnel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Union[str, Any] = [ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", "TFFunnelForPreTraining", "TFFunnelForQuestionAnswering", "TFFunnelForSequenceClassification", "TFFunnelForTokenClassification", "TFFunnelModel", "TFFunnelPreTrainedModel", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
47
0
'''simple docstring''' from collections.abc import Sequence def snake_case_ ( lowerCAmelCase_ = None )-> Optional[int]: '''simple docstring''' if nums is None or not nums: raise ValueError('Input sequence should not be empty' ) _UpperCAmelCase : Any = nums[0] for i in range(1 , len(lowerCAmelCase__ ) ): _UpperCAmelCase : Union[str, Any] = nums[i] _UpperCAmelCase : List[Any] = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user A_ : List[Any] = int(input("""Enter number of elements : """).strip()) A_ : int = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n] print(max_subsequence_sum(array))
366
'''simple docstring''' import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## A_ : Any = 1_6 A_ : Union[str, Any] = 3_2 def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 16 )-> Optional[int]: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _UpperCAmelCase : str = DatasetDict( { """train""": dataset["""train"""].select(lowerCAmelCase_ ), """validation""": dataset["""train"""].select(lowerCAmelCase_ ), """test""": dataset["""validation"""], } ) def tokenize_function(lowerCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase : Optional[int] = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase : List[str] = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase : Any = 8 else: _UpperCAmelCase : Dict = None return tokenizer.pad( lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , ) # Instantiate dataloaders. _UpperCAmelCase : Union[str, Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) _UpperCAmelCase : Union[str, Any] = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) _UpperCAmelCase : Dict = DataLoader( tokenized_datasets["""test"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader, test_dataloader def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: '''simple docstring''' _UpperCAmelCase : Optional[int] = [] # Download the dataset _UpperCAmelCase : Dict = load_dataset("""glue""" , """mrpc""" ) # Create our splits _UpperCAmelCase : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator _UpperCAmelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase : Dict = config["""lr"""] _UpperCAmelCase : List[Any] = int(config["""num_epochs"""] ) _UpperCAmelCase : str = int(config["""seed"""] ) _UpperCAmelCase : List[Any] = int(config["""batch_size"""] ) _UpperCAmelCase : int = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation _UpperCAmelCase : List[Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _UpperCAmelCase : Dict = batch_size // MAX_GPU_BATCH_SIZE _UpperCAmelCase : Tuple = MAX_GPU_BATCH_SIZE set_seed(lowerCAmelCase_ ) # New Code # # Create our folds: _UpperCAmelCase : Any = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) _UpperCAmelCase : Tuple = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase_ ): _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = get_fold_dataloaders( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase : List[Any] = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase : int = AdamW(params=model.parameters() , lr=lowerCAmelCase_ ) # Instantiate scheduler _UpperCAmelCase : Dict = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Now we train the model for epoch in range(lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase : Union[str, Any] = model(**lowerCAmelCase_ ) _UpperCAmelCase : Dict = outputs.loss _UpperCAmelCase : int = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase : List[str] = model(**lowerCAmelCase_ ) _UpperCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) _UpperCAmelCase : List[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase_ ) # New Code # # We also run predictions on the test set at the very end _UpperCAmelCase : Tuple = [] for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase : List[Any] = model(**lowerCAmelCase_ ) _UpperCAmelCase : Any = outputs.logits _UpperCAmelCase ,_UpperCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(lowerCAmelCase_ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: _UpperCAmelCase : List[Any] = torch.cat(lowerCAmelCase_ , dim=0 ) _UpperCAmelCase : Union[str, Any] = torch.stack(lowerCAmelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) _UpperCAmelCase : List[str] = metric.compute(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ ) accelerator.print("""Average test metrics from all folds:""" , lowerCAmelCase_ ) def snake_case_ ( )-> Any: '''simple docstring''' _UpperCAmelCase : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=lowerCAmelCase_ , default=3 , help="""The number of splits to perform across the dataset""" ) _UpperCAmelCase : Optional[int] = parser.parse_args() _UpperCAmelCase : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
349
0
from __future__ import annotations def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : List[Any] = sorted(numsa + numsa ) _A , _A : Any = divmod(len(snake_case_ ),2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _snake_case = [float(x) for x in input("Enter the elements of first array: ").split()] _snake_case = [float(x) for x in input("Enter the elements of second array: ").split()] print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
26
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowercase ( UpperCamelCase__ ): def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int: super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a ) _A : Optional[int] = Sql( cache_dir=_a , features=_a , sql=_a , con=_a , **_a , ) def a__ ( self ) -> Optional[Any]: _A : Tuple = None _A : int = None _A : Tuple = None _A : Union[str, Any] = None self.builder.download_and_prepare( download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , ) # Build dataset for splits _A : int = self.builder.as_dataset( split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory ) return dataset class lowercase : def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]: if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) _A : Dict = dataset _A : int = name _A : Union[str, Any] = con _A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _A : str = num_proc _A : Optional[Any] = to_sql_kwargs def a__ ( self ) -> int: _A : Any = self.to_sql_kwargs.pop("""sql""" , _a ) _A : List[str] = self.to_sql_kwargs.pop("""con""" , _a ) _A : int = self.to_sql_kwargs.pop("""index""" , _a ) _A : List[str] = self._write(index=_a , **self.to_sql_kwargs ) return written def a__ ( self , _a ) -> Optional[int]: _A , _A , _A : List[str] = args _A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs _A : str = query_table( table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , ) _A : Tuple = batch.to_pandas() _A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a ) return num_rows or len(_a ) def a__ ( self , _a , **_a ) -> int: _A : Any = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _A , _A : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
26
1
from typing import TYPE_CHECKING from ...utils import _LazyModule _SCREAMING_SNAKE_CASE : Dict = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys _SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
218
from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
218
1
'''simple docstring''' a__ : Union[str, Any] ='''Tobias Carryer''' from time import time class snake_case : """simple docstring""" def __init__( self : Union[str, Any] , __A : List[str] , __A : List[Any] , __A : Optional[Any] , __A : str=int(time() ) ): # noqa: B008 __UpperCamelCase = multiplier __UpperCamelCase = increment __UpperCamelCase = modulo __UpperCamelCase = seed def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. a__ : Union[str, Any] =LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31) while True: print(lcg.next_number())
53
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller UpperCAmelCase__ : Any = 3 def lowerCamelCase__ ( a ) -> int: print('''Generating primitive root of p''' ) while True: _A: Union[str, Any] = random.randrange(3 , a ) if pow(a , 2 , a ) == 1: continue if pow(a , a , a ) == 1: continue return g def lowerCamelCase__ ( a ) -> tuple[tuple[int, int, int, int], tuple[int, int]]: print('''Generating prime p...''' ) _A: Dict = rabin_miller.generate_large_prime(a ) # select large prime number. _A: Any = primitive_root(a ) # one primitive root on modulo p. _A: Optional[Any] = random.randrange(3 , a ) # private_key -> have to be greater than 2 for safety. _A: Dict = cryptomath.find_mod_inverse(pow(a , a , a ) , a ) _A: Union[str, Any] = (key_size, e_a, e_a, p) _A: Union[str, Any] = (key_size, d) return public_key, private_key def lowerCamelCase__ ( a , a ) -> None: if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ): print('''\nWARNING:''' ) print( f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" '''Use a different name or delete these files and re-run this program.''' ) sys.exit() _A , _A: Any = generate_key(a ) print(f"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(f"""{name}_pubkey.txt""" , '''w''' ) as fo: fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" ) print(f"""Writing private key to file {name}_privkey.txt...""" ) with open(f"""{name}_privkey.txt""" , '''w''' ) as fo: fo.write(f"""{private_key[0]},{private_key[1]}""" ) def lowerCamelCase__ ( ) -> None: print('''Making key files...''' ) make_key_files('''elgamal''' , 20_48 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
121
0
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class UpperCAmelCase ( lowercase__ ): def __init__( self : Union[str, Any] , __snake_case : Union[str, "sqlalchemy.sql.Selectable"] , __snake_case : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __snake_case : Optional[Features] = None , __snake_case : str = None , __snake_case : bool = False , **__snake_case : List[Any] , ) -> int: super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a ) _lowerCAmelCase = Sql( cache_dir=_a , features=_a , sql=_a , con=_a , **_a , ) def lowercase__ ( self : Union[str, Any] ) -> str: _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None self.builder.download_and_prepare( download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , ) # Build dataset for splits _lowerCAmelCase = self.builder.as_dataset( split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory ) return dataset class UpperCAmelCase : def __init__( self : Optional[Any] , __snake_case : Dataset , __snake_case : str , __snake_case : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , **__snake_case : int , ) -> int: if num_proc is not None and num_proc <= 0: raise ValueError(f"num_proc {num_proc} must be an integer > 0." ) _lowerCAmelCase = dataset _lowerCAmelCase = name _lowerCAmelCase = con _lowerCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _lowerCAmelCase = num_proc _lowerCAmelCase = to_sql_kwargs def lowercase__ ( self : List[str] ) -> str: _lowerCAmelCase = self.to_sql_kwargs.pop("""sql""" , _a ) _lowerCAmelCase = self.to_sql_kwargs.pop("""con""" , _a ) _lowerCAmelCase = self.to_sql_kwargs.pop("""index""" , _a ) _lowerCAmelCase = self._write(index=_a , **self.to_sql_kwargs ) return written def lowercase__ ( self : Any , __snake_case : Tuple ) -> Any: _lowerCAmelCase = args _lowerCAmelCase = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs _lowerCAmelCase = query_table( table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , ) _lowerCAmelCase = batch.to_pandas() _lowerCAmelCase = df.to_sql(self.name , self.con , index=_a , **_a ) return num_rows or len(_a ) def lowercase__ ( self : Optional[int] , __snake_case : List[str] , **__snake_case : List[Any] ) -> List[Any]: _lowerCAmelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _lowerCAmelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
355
'''simple docstring''' # Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = { """en""": """Machine learning is great, isn't it?""", """ru""": """Машинное обучение - это здорово, не так ли?""", """de""": """Maschinelles Lernen ist großartig, oder?""", } # BLUE scores as follows: # "pair": [fairseq, transformers] _lowerCAmelCase = { """ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""], """en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""], """en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""], """de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""], } _lowerCAmelCase = f"{src_lang}-{tgt_lang}" _lowerCAmelCase = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase ) _lowerCAmelCase = os.path.join(lowerCAmelCase , """README.md""" ) print(f"Generating {path}" ) with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(lowerCAmelCase ) # make sure we are under the root of the project A__ : Optional[int] =Path(__file__).resolve().parent.parent.parent A__ : Union[str, Any] =repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: A__ , A__ , A__ : Optional[Any] =model_name.split('''-''') A__ : List[str] =model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
220
0
"""simple docstring""" class UpperCamelCase__ : """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ): # we need a list not a string, so do something to change the type lowerCAmelCase_ : List[Any] = arr.split(',' ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowerCAmelCase_ : Union[str, Any] = [int(self.array[0] )] * len(self.array ) lowerCAmelCase_ : List[Any] = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): lowerCAmelCase_ : Union[str, Any] = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) lowerCAmelCase_ : str = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": lowercase__ : List[Any] = input("""please input some numbers:""") lowercase__ : Union[str, Any] = SubArray(whole_array) lowercase__ : str = array.solve_sub_array() print(("""the results is:""", re))
224
"""simple docstring""" from typing import Union import fire import torch from tqdm import tqdm def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str = "cpu" , lowerCAmelCase__ : Union[str, None] = None ) -> None: """simple docstring""" lowerCAmelCase_ : Any = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ ) for k, v in tqdm(state_dict.items() ): if not isinstance(lowerCAmelCase__ , torch.Tensor ): raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' ) lowerCAmelCase_ : str = v.half() if save_path is None: # overwrite src_path lowerCAmelCase_ : Dict = src_path torch.save(lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": fire.Fire(convert)
224
1
def _UpperCamelCase (a__ :int , a__ :int ): """simple docstring""" return int((input_a, input_a).count(0 ) == 0 ) def _UpperCamelCase (): """simple docstring""" assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
369
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {"vocab_file": "vocab.txt"} UpperCamelCase__ = { "vocab_file": { "openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt", }, } UpperCamelCase__ = { "openbmb/cpm-ant-10b": 1_024, } def _UpperCamelCase (a__ :Tuple ): """simple docstring""" UpperCamelCase__ = collections.OrderedDict() with open(a__ , """r""" , encoding="""utf-8""" ) as reader: UpperCamelCase__ = reader.readlines() for index, token in enumerate(a__ ): UpperCamelCase__ = token.rstrip("""\n""" ) UpperCamelCase__ = index return vocab class __SCREAMING_SNAKE_CASE ( _a ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<unk>" , __lowerCAmelCase=200 ): UpperCamelCase__ = vocab UpperCamelCase__ = unk_token UpperCamelCase__ = max_input_chars_per_word def _lowerCamelCase ( self , __lowerCAmelCase ): UpperCamelCase__ = list(__lowerCAmelCase ) if len(__lowerCAmelCase ) > self.max_input_chars_per_word: return [self.unk_token] UpperCamelCase__ = 0 UpperCamelCase__ = [] while start < len(__lowerCAmelCase ): UpperCamelCase__ = len(__lowerCAmelCase ) UpperCamelCase__ = None while start < end: UpperCamelCase__ = """""".join(chars[start:end] ) if substr in self.vocab: UpperCamelCase__ = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(__lowerCAmelCase ) UpperCamelCase__ = end return sub_tokens class __SCREAMING_SNAKE_CASE ( _a ): snake_case : Optional[int] = VOCAB_FILES_NAMES snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case : Optional[int] = ["""input_ids""", """attention_mask"""] snake_case : int = False def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<d>" , __lowerCAmelCase="</d>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="</n>" , __lowerCAmelCase="</_>" , __lowerCAmelCase="left" , **__lowerCAmelCase , ): requires_backends(self , ["""jieba"""] ) super().__init__( bod_token=__lowerCAmelCase , eod_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , line_token=__lowerCAmelCase , space_token=__lowerCAmelCase , padding_side=__lowerCAmelCase , **__lowerCAmelCase , ) UpperCamelCase__ = bod_token UpperCamelCase__ = eod_token UpperCamelCase__ = load_vocab(__lowerCAmelCase ) UpperCamelCase__ = self.encoder[space_token] UpperCamelCase__ = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] UpperCamelCase__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCAmelCase : x[1] ) ) UpperCamelCase__ = {v: k for k, v in self.encoder.items()} UpperCamelCase__ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def _lowerCamelCase ( self ): return self.encoder[self.bod_token] @property def _lowerCamelCase ( self ): return self.encoder[self.eod_token] @property def _lowerCamelCase ( self ): return self.encoder["\n"] @property def _lowerCamelCase ( self ): return len(self.encoder ) def _lowerCamelCase ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def _lowerCamelCase ( self , __lowerCAmelCase ): UpperCamelCase__ = [] for x in jieba.cut(__lowerCAmelCase , cut_all=__lowerCAmelCase ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowerCAmelCase ) ) return output_tokens def _lowerCamelCase ( self , __lowerCAmelCase , **__lowerCAmelCase ): UpperCamelCase__ = [i for i in token_ids if i >= 0] UpperCamelCase__ = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(__lowerCAmelCase , **__lowerCAmelCase ) def _lowerCamelCase ( self , __lowerCAmelCase ): return token in self.encoder def _lowerCamelCase ( self , __lowerCAmelCase ): return "".join(__lowerCAmelCase ) def _lowerCamelCase ( self , __lowerCAmelCase ): return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) ) def _lowerCamelCase ( self , __lowerCAmelCase ): return self.decoder.get(__lowerCAmelCase , self.unk_token ) def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): if os.path.isdir(__lowerCAmelCase ): UpperCamelCase__ = os.path.join( __lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) else: UpperCamelCase__ = (filename_prefix + """-""" if filename_prefix else """""") + save_directory UpperCamelCase__ = 0 if " " in self.encoder: UpperCamelCase__ = self.encoder[""" """] del self.encoder[" "] if "\n" in self.encoder: UpperCamelCase__ = self.encoder["""\n"""] del self.encoder["\n"] UpperCamelCase__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCAmelCase : x[1] ) ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" """ Please check that the vocabulary is not corrupted!""" ) UpperCamelCase__ = token_index writer.write(token + """\n""" ) index += 1 return (vocab_file,) def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is not None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) return [1] + ([0] * len(__lowerCAmelCase ))
87
0
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging _A = logging.get_logger(__name__) logging.set_verbosity_info() def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): if "xprophetnet" in prophetnet_checkpoint_path: __UpperCamelCase =XLMProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase , __UpperCamelCase =XLMProphetNetForConditionalGeneration.from_pretrained( SCREAMING_SNAKE_CASE__ , output_loading_info=SCREAMING_SNAKE_CASE__ ) else: __UpperCamelCase =ProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase , __UpperCamelCase =ProphetNetForConditionalGeneration.from_pretrained( SCREAMING_SNAKE_CASE__ , output_loading_info=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =['key_proj', 'value_proj', 'query_proj'] __UpperCamelCase ={ 'self_attn': 'ngram_self_attn', 'cross_attn': 'encoder_attn', 'cross_attn_layer_norm': 'encoder_attn_layer_norm', 'feed_forward_layer_norm': 'final_layer_norm', 'feed_forward': '', 'intermediate': 'fc1', 'output': 'fc2', 'key_proj': 'k_proj', 'query_proj': 'q_proj', 'value_proj': 'v_proj', 'word_embeddings': 'embed_tokens', 'embeddings_layer_norm': 'emb_layer_norm', 'relative_pos_embeddings': 'relative_linear', 'ngram_embeddings': 'ngram_input_embed', 'position_embeddings': 'embed_positions', } for key in loading_info["missing_keys"]: __UpperCamelCase =key.split('.' ) if attributes[0] == "lm_head": __UpperCamelCase =prophet __UpperCamelCase =prophet_old else: __UpperCamelCase =prophet.prophetnet __UpperCamelCase =prophet_old.model __UpperCamelCase =False for attribute in attributes: if attribute in mapping: __UpperCamelCase =mapping[attribute] if not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) > 0: __UpperCamelCase =attribute elif hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" __UpperCamelCase =old_model.weight logger.info(F'{attribute} is initialized.' ) __UpperCamelCase =True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" __UpperCamelCase =old_model.bias logger.info(F'{attribute} is initialized' ) __UpperCamelCase =True break elif attribute in special_keys and hasattr(SCREAMING_SNAKE_CASE__ , 'in_proj_weight' ): __UpperCamelCase =old_model.in_proj_weight.shape[0] // 3 __UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": __UpperCamelCase =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) __UpperCamelCase =nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": __UpperCamelCase =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) __UpperCamelCase =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": __UpperCamelCase =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) __UpperCamelCase =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) __UpperCamelCase =True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings." __UpperCamelCase =nn.Parameter(old_model.embed_positions.weight[:5_12, :] ) __UpperCamelCase =True break if attribute.isdigit(): __UpperCamelCase =model[int(SCREAMING_SNAKE_CASE__ )] __UpperCamelCase =old_model[int(SCREAMING_SNAKE_CASE__ )] else: __UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if old_attribute == "": __UpperCamelCase =old_model else: if not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise ValueError(F'{old_model} does not have {old_attribute}' ) __UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not is_key_init: raise ValueError(F'{key} was not correctly initialized!' ) print(F'Saving model to {pytorch_dump_folder_path}' ) prophet.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _A = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
62
"""simple docstring""" from __future__ import annotations class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int = 0): a : Tuple = key def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : Any = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : str = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
40
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __snake_case :str = { '''configuration_efficientformer''': [ '''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EfficientFormerConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Tuple = ['''EfficientFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :int = [ '''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EfficientFormerForImageClassification''', '''EfficientFormerForImageClassificationWithTeacher''', '''EfficientFormerModel''', '''EfficientFormerPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Any = [ '''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFEfficientFormerForImageClassification''', '''TFEfficientFormerForImageClassificationWithTeacher''', '''TFEfficientFormerModel''', '''TFEfficientFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys __snake_case :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
131
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): return base * power(_UpperCAmelCase , (exponent - 1) ) if exponent else 1 if __name__ == "__main__": print('''Raise base to the power of exponent using recursion...''') __snake_case :List[Any] = int(input('''Enter the base: ''').strip()) __snake_case :Dict = int(input('''Enter the exponent: ''').strip()) __snake_case :int = power(base, abs(exponent)) if exponent < 0: # power() does not properly deal w/ negative exponents __snake_case :Optional[Any] = 1 / result print(f'{base} to the power of {exponent} is {result}')
131
1
from __future__ import annotations __UpperCAmelCase = list[tuple[int, int]] __UpperCAmelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __UpperCAmelCase = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> List[Any]: UpperCAmelCase_ : str = pos_x UpperCAmelCase_ : str = pos_y UpperCAmelCase_ : int = (pos_y, pos_x) UpperCAmelCase_ : str = goal_x UpperCAmelCase_ : Optional[int] = goal_y UpperCAmelCase_ : Tuple = g_cost UpperCAmelCase_ : str = parent UpperCAmelCase_ : int = self.calculate_heuristic() def __UpperCAmelCase ( self ) -> float: UpperCAmelCase_ : int = abs(self.pos_x - self.goal_x ) UpperCAmelCase_ : Optional[int] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , _UpperCamelCase ) -> bool: return self.f_cost < other.f_cost class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: UpperCAmelCase_ : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _UpperCamelCase ) UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , _UpperCamelCase ) UpperCAmelCase_ : Optional[int] = [self.start] UpperCAmelCase_ : list[Node] = [] UpperCAmelCase_ : List[str] = False def __UpperCAmelCase ( self ) -> Path | None: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: UpperCAmelCase_ : Optional[int] = True return self.retrace_path(_UpperCamelCase ) self.closed_nodes.append(_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = self.get_successors(_UpperCamelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_UpperCamelCase ) else: # retrieve the best current path UpperCAmelCase_ : int = self.open_nodes.pop(self.open_nodes.index(_UpperCamelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_UpperCamelCase ) else: self.open_nodes.append(_UpperCamelCase ) if not self.reached: return [self.start.pos] return None def __UpperCAmelCase ( self , _UpperCamelCase ) -> list[Node]: UpperCAmelCase_ : int = [] for action in delta: UpperCAmelCase_ : str = parent.pos_x + action[1] UpperCAmelCase_ : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCamelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _UpperCamelCase , _UpperCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _UpperCamelCase , ) ) return successors def __UpperCAmelCase ( self , _UpperCamelCase ) -> Path: UpperCAmelCase_ : Any = node UpperCAmelCase_ : Tuple = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase_ : Dict = current_node.parent path.reverse() return path if __name__ == "__main__": __UpperCAmelCase = (0, 0) __UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') __UpperCAmelCase = GreedyBestFirst(init, goal) __UpperCAmelCase = greedy_bf.search() if path: for pos_x, pos_y in path: __UpperCAmelCase = 2 for elem in grid: print(elem)
29
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : "DiagonalGaussianDistribution" class lowerCamelCase (_snake_case , _snake_case ): '''simple docstring''' _snake_case : Optional[int] = True @register_to_config def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]: super().__init__() # pass init params to Encoder UpperCAmelCase_ : List[str] = Encoder( in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , ) # pass init params to Decoder UpperCAmelCase_ : Dict = Decoder( in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , ) UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 ) UpperCAmelCase_ : Any = False UpperCAmelCase_ : int = False # only relevant if vae tiling is enabled UpperCAmelCase_ : Optional[int] = self.config.sample_size UpperCAmelCase_ : int = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) UpperCAmelCase_ : Optional[Any] = 0.25 def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]: if isinstance(_UpperCamelCase , (Encoder, Decoder) ): UpperCAmelCase_ : Union[str, Any] = value def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int: UpperCAmelCase_ : Tuple = use_tiling def __UpperCAmelCase ( self ) -> Dict: self.enable_tiling(_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : str = True def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : Optional[int] = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]: UpperCAmelCase_ : Optional[int] = {} def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if hasattr(_UpperCamelCase , 'set_processor' ): UpperCAmelCase_ : Optional[int] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) return processors def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() ) if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if hasattr(_UpperCamelCase , 'set_processor' ): if not isinstance(_UpperCamelCase , _UpperCamelCase ): module.set_processor(_UpperCamelCase ) else: module.set_processor(processor.pop(f"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> Union[str, Any]: self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput: if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase ) if self.use_slicing and x.shape[0] > 1: UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )] UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase ) else: UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase ) UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase ) UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase ) UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase ) @apply_forward_hook def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: if self.use_slicing and z.shape[0] > 1: UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )] UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase ) else: UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase ) for y in range(_UpperCamelCase ): UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict: UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase ) for x in range(_UpperCamelCase ): UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput: UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor ) UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. UpperCAmelCase_ : List[str] = [] for i in range(0 , x.shape[2] , _UpperCamelCase ): UpperCAmelCase_ : Any = [] for j in range(0 , x.shape[3] , _UpperCamelCase ): UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase ) UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase ) row.append(_UpperCamelCase ) rows.append(_UpperCamelCase ) UpperCAmelCase_ : str = [] for i, row in enumerate(_UpperCamelCase ): UpperCAmelCase_ : List[Any] = [] for j, tile in enumerate(_UpperCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase ) if j > 0: UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) ) UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 ) UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]: UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor ) UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. UpperCAmelCase_ : Union[str, Any] = [] for i in range(0 , z.shape[2] , _UpperCamelCase ): UpperCAmelCase_ : List[str] = [] for j in range(0 , z.shape[3] , _UpperCamelCase ): UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase ) UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase ) row.append(_UpperCamelCase ) rows.append(_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = [] for i, row in enumerate(_UpperCamelCase ): UpperCAmelCase_ : List[Any] = [] for j, tile in enumerate(_UpperCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase ) if j > 0: UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) ) UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]: UpperCAmelCase_ : Optional[Any] = sample UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist if sample_posterior: UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase ) else: UpperCAmelCase_ : int = posterior.mode() UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase )
29
1
'''simple docstring''' import os import sys import unittest a : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path a : Any = os.path.join(git_repo_path, 'src', 'diffusers') class a ( unittest.TestCase ): def A_ ( self : Optional[Any] ): snake_case_ = find_backend(''' if not is_torch_available():''' ) self.assertEqual(lowercase_ , '''torch''' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") snake_case_ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' ) self.assertEqual(lowercase_ , '''torch_and_transformers''' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") snake_case_ = find_backend( ''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' ) self.assertEqual(lowercase_ , '''torch_and_transformers_and_onnx''' ) def A_ ( self : int ): snake_case_ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , lowercase_ ) self.assertIn('''torch_and_transformers''' , lowercase_ ) self.assertIn('''flax_and_transformers''' , lowercase_ ) self.assertIn('''torch_and_transformers_and_onnx''' , lowercase_ ) # Likewise, we can't assert on the exact content of a key self.assertIn('''UNet2DModel''' , objects['''torch'''] ) self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] ) self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] ) self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] ) self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] ) self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] ) def A_ ( self : Any ): snake_case_ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(lowercase_ , '''\nCONSTANT = None\n''' ) snake_case_ = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( lowercase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) snake_case_ = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, \'torch\') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, \'torch\') ''' snake_case_ = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(lowercase_ , lowercase_ ) def A_ ( self : Any ): snake_case_ = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) ''' snake_case_ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , lowercase_ )
72
'''simple docstring''' import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('>=', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType a : Union[str, Any] = get_logger(__name__) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> Tuple: '''simple docstring''' os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase ) with FSDP.state_dict_type( __UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): snake_case_ = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin" snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase ) if accelerator.process_index == 0: logger.info(F"Saving model to {output_model_file}" ) torch.save(__UpperCAmelCase, __UpperCAmelCase ) logger.info(F"Model saved to {output_model_file}" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ = ( F"{MODEL_NAME}_rank{accelerator.process_index}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" ) snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase ) logger.info(F"Saving model to {output_model_file}" ) torch.save(__UpperCAmelCase, __UpperCAmelCase ) logger.info(F"Model saved to {output_model_file}" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ = os.path.join(__UpperCAmelCase, F"{MODEL_NAME}_{model_index}" ) os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase ) logger.info(F"Saving model to {ckpt_dir}" ) snake_case_ = {'''model''': state_dict} dist_cp.save_state_dict( state_dict=__UpperCAmelCase, storage_writer=dist_cp.FileSystemWriter(__UpperCAmelCase ), planner=DefaultSavePlanner(), ) logger.info(F"Model saved to {ckpt_dir}" ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> str: '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( __UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(__UpperCAmelCase ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( '''Set the `sync_module_states` flag to `True` so that model states are synced across processes when ''' '''initializing FSDP object''' ) return snake_case_ = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin" snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase ) logger.info(F"Loading model from {input_model_file}" ) snake_case_ = torch.load(__UpperCAmelCase ) logger.info(F"Model loaded from {input_model_file}" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ = ( F"{MODEL_NAME}_rank{accelerator.process_index}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" ) snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase ) logger.info(F"Loading model from {input_model_file}" ) snake_case_ = torch.load(__UpperCAmelCase ) logger.info(F"Model loaded from {input_model_file}" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ = ( os.path.join(__UpperCAmelCase, F"{MODEL_NAME}_{model_index}" ) if F"{MODEL_NAME}" not in input_dir else input_dir ) logger.info(F"Loading model from {ckpt_dir}" ) snake_case_ = {'''model''': model.state_dict()} dist_cp.load_state_dict( state_dict=__UpperCAmelCase, storage_reader=dist_cp.FileSystemReader(__UpperCAmelCase ), planner=DefaultLoadPlanner(), ) snake_case_ = state_dict['''model'''] logger.info(F"Model loaded from {ckpt_dir}" ) model.load_state_dict(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> Dict: '''simple docstring''' os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase ) with FSDP.state_dict_type( __UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): snake_case_ = FSDP.optim_state_dict(__UpperCAmelCase, __UpperCAmelCase ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: snake_case_ = ( F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin" ) snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase ) logger.info(F"Saving Optimizer state to {output_optimizer_file}" ) torch.save(__UpperCAmelCase, __UpperCAmelCase ) logger.info(F"Optimizer state saved in {output_optimizer_file}" ) else: snake_case_ = os.path.join(__UpperCAmelCase, F"{OPTIMIZER_NAME}_{optimizer_index}" ) os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase ) logger.info(F"Saving Optimizer state to {ckpt_dir}" ) dist_cp.save_state_dict( state_dict={'''optimizer''': optim_state}, storage_writer=dist_cp.FileSystemWriter(__UpperCAmelCase ), planner=DefaultSavePlanner(), ) logger.info(F"Optimizer state saved in {ckpt_dir}" ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> Union[str, Any]: '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( __UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: snake_case_ = ( F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin" ) snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase ) logger.info(F"Loading Optimizer state from {input_optimizer_file}" ) snake_case_ = torch.load(__UpperCAmelCase ) logger.info(F"Optimizer state loaded from {input_optimizer_file}" ) else: snake_case_ = ( os.path.join(__UpperCAmelCase, F"{OPTIMIZER_NAME}_{optimizer_index}" ) if F"{OPTIMIZER_NAME}" not in input_dir else input_dir ) logger.info(F"Loading Optimizer from {ckpt_dir}" ) snake_case_ = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict(), optimizer_key='''optimizer''', storage_reader=dist_cp.FileSystemReader(__UpperCAmelCase ), ) snake_case_ = optim_state['''optimizer'''] logger.info(F"Optimizer loaded from {ckpt_dir}" ) snake_case_ = FSDP.optim_state_dict_to_load(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) optimizer.load_state_dict(__UpperCAmelCase )
72
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ : int = logging.get_logger(__name__) snake_case__ : List[str] = { '''facebook/data2vec-vision-base-ft''': ( '''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json''' ), } class snake_case_( a__ ): __UpperCamelCase = '''data2vec-vision''' def __init__( self : Any , UpperCamelCase_ : Any=7_6_8 , UpperCamelCase_ : Tuple=1_2 , UpperCamelCase_ : Any=1_2 , UpperCamelCase_ : List[str]=3_0_7_2 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : str=1E-12 , UpperCamelCase_ : Dict=2_2_4 , UpperCamelCase_ : List[str]=1_6 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Any=True , UpperCamelCase_ : str=[3, 5, 7, 1_1] , UpperCamelCase_ : Dict=[1, 2, 3, 6] , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Any=0.4 , UpperCamelCase_ : str=2_5_6 , UpperCamelCase_ : Dict=1 , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : str=2_5_5 , **UpperCamelCase_ : Optional[Any] , ): super().__init__(**UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = hidden_size lowerCAmelCase : Optional[int] = num_hidden_layers lowerCAmelCase : Optional[int] = num_attention_heads lowerCAmelCase : List[str] = intermediate_size lowerCAmelCase : str = hidden_act lowerCAmelCase : Dict = hidden_dropout_prob lowerCAmelCase : Any = attention_probs_dropout_prob lowerCAmelCase : List[str] = initializer_range lowerCAmelCase : str = layer_norm_eps lowerCAmelCase : List[Any] = image_size lowerCAmelCase : Dict = patch_size lowerCAmelCase : Tuple = num_channels lowerCAmelCase : List[Any] = use_mask_token lowerCAmelCase : List[str] = use_absolute_position_embeddings lowerCAmelCase : Optional[int] = use_relative_position_bias lowerCAmelCase : List[str] = use_shared_relative_position_bias lowerCAmelCase : Tuple = layer_scale_init_value lowerCAmelCase : int = drop_path_rate lowerCAmelCase : int = use_mean_pooling # decode head attributes (semantic segmentation) lowerCAmelCase : Optional[int] = out_indices lowerCAmelCase : Optional[int] = pool_scales # auxiliary head attributes (semantic segmentation) lowerCAmelCase : Tuple = use_auxiliary_head lowerCAmelCase : Optional[Any] = auxiliary_loss_weight lowerCAmelCase : List[Any] = auxiliary_channels lowerCAmelCase : Dict = auxiliary_num_convs lowerCAmelCase : List[str] = auxiliary_concat_input lowerCAmelCase : int = semantic_loss_ignore_index class snake_case_( a__ ): __UpperCamelCase = version.parse('''1.11''' ) @property def lowerCamelCase__ ( self : str ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCamelCase__ ( self : str ): return 1E-4
60
"""simple docstring""" from __future__ import annotations def a__ ( snake_case__ , snake_case__ ) -> bool: if len(snake_case__ ) == 0: return False lowerCamelCase = len(snake_case__ ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , snake_case__ ) else: return binary_search(a_list[midpoint + 1 :] , snake_case__ ) if __name__ == "__main__": lowerCAmelCase : List[Any] = input("""Enter numbers separated by comma:\n""").strip() lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(""",""")] lowerCAmelCase : Optional[int] = int(input("""Enter the number to be found in the list:\n""").strip()) lowerCAmelCase : Union[str, Any] = """""" if binary_search(sequence, target) else """not """ print(F"""{target} was {not_str}found in {sequence}""")
291
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor UpperCAmelCase = logging.get_logger(__name__) class UpperCAmelCase_ ( _UpperCAmelCase): def __init__( self : str , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) -> Any: warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
352
"""simple docstring""" import qiskit def lowercase ( a__ : int = 2 ) -> qiskit.result.counts.Counts: _UpperCamelCase = qubits # Using Aer's simulator _UpperCamelCase = qiskit.Aer.get_backend('''aer_simulator''' ) # Creating a Quantum Circuit acting on the q register _UpperCamelCase = qiskit.QuantumCircuit(a__ , a__ ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , a__ ): # Adding CX (CNOT) gate circuit.cx(i - 1 , a__ ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(a__ ) ) , list(range(a__ ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator _UpperCamelCase = qiskit.execute(a__ , a__ , shots=1000 ) return job.result().get_counts(a__ ) if __name__ == "__main__": print(F'''Total count for various states are: {quantum_entanglement(3)}''')
54
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __UpperCAmelCase = 16 __UpperCAmelCase = 32 def lowercase__ ( __snake_case : Accelerator , __snake_case : int = 16 ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' ) UpperCAmelCase_ : Any = load_dataset('glue' , 'mrpc' ) def tokenize_function(__snake_case : Tuple ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase_ : List[Any] = datasets.map( __snake_case , batched=__snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ : List[str] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(__snake_case : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase_ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase_ : int = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase_ : Union[str, Any] = 8 else: UpperCAmelCase_ : List[str] = None return tokenizer.pad( __snake_case , padding='longest' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='pt' , ) # Instantiate dataloaders. UpperCAmelCase_ : int = DataLoader( tokenized_datasets['train'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) UpperCAmelCase_ : Tuple = DataLoader( tokenized_datasets['validation'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __UpperCAmelCase = mocked_dataloaders # noqa: F811 def lowercase__ ( __snake_case : List[str] , __snake_case : Optional[int] ): '''simple docstring''' if os.environ.get('TESTING_MOCKED_DATALOADERS' , __snake_case ) == "1": UpperCAmelCase_ : Optional[int] = 2 # New Code # UpperCAmelCase_ : int = int(args.gradient_accumulation_steps ) UpperCAmelCase_ : List[Any] = int(args.local_sgd_steps ) # Initialize accelerator UpperCAmelCase_ : Dict = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__snake_case ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ : List[str] = config['lr'] UpperCAmelCase_ : str = int(config['num_epochs'] ) UpperCAmelCase_ : str = int(config['seed'] ) UpperCAmelCase_ : str = int(config['batch_size'] ) UpperCAmelCase_ : List[str] = evaluate.load('glue' , 'mrpc' ) set_seed(__snake_case ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = get_dataloaders(__snake_case , __snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase_ : Any = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase_ : Optional[Any] = AdamW(params=model.parameters() , lr=__snake_case ) # Instantiate scheduler UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # Now we train the model for epoch in range(__snake_case ): model.train() with LocalSGD( accelerator=__snake_case , model=__snake_case , local_sgd_steps=__snake_case , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__snake_case ): UpperCAmelCase_ : str = model(**__snake_case ) UpperCAmelCase_ : List[str] = output.loss accelerator.backward(__snake_case ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ : Any = model(**__snake_case ) UpperCAmelCase_ : Union[str, Any] = outputs.logits.argmax(dim=-1 ) UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=__snake_case , references=__snake_case , ) UpperCAmelCase_ : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , __snake_case ) def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=__snake_case , default=__snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) # New Code # parser.add_argument( '--gradient_accumulation_steps' , type=__snake_case , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , ) parser.add_argument( '--local_sgd_steps' , type=__snake_case , default=8 , help='Number of local SGD steps or None to disable local SGD' ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) UpperCAmelCase_ : Dict = parser.parse_args() UpperCAmelCase_ : List[Any] = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
29
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available SCREAMING_SNAKE_CASE_:str = { """configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""], """tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Union[str, Any] = [ """TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""", """AdaptiveEmbedding""", """TransfoXLForSequenceClassification""", """TransfoXLLMHeadModel""", """TransfoXLModel""", """TransfoXLPreTrainedModel""", """load_tf_weights_in_transfo_xl""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Any = [ """TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFAdaptiveEmbedding""", """TFTransfoXLForSequenceClassification""", """TFTransfoXLLMHeadModel""", """TFTransfoXLMainLayer""", """TFTransfoXLModel""", """TFTransfoXLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
116
0
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING _A = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class lowercase_ ( __SCREAMING_SNAKE_CASE ): def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): """simple docstring""" super().__init__(*__UpperCamelCase , **__UpperCamelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowerCamelCase_ ( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ): """simple docstring""" UpperCamelCase_ = {} UpperCamelCase_ = {} if prompt is not None: UpperCamelCase_ = prompt if generate_kwargs is not None: UpperCamelCase_ = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: UpperCamelCase_ = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) UpperCamelCase_ = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , __UpperCamelCase , **__UpperCamelCase ): """simple docstring""" return super().__call__(__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=None ): """simple docstring""" UpperCamelCase_ = load_image(__UpperCamelCase ) if prompt is not None: if not isinstance(__UpperCamelCase , __UpperCamelCase ): raise ValueError( f'''Received an invalid text input, got - {type(__UpperCamelCase )} - but expected a single string. ''' """Note also that one single text can be provided for conditional image to text generation.""" ) UpperCamelCase_ = self.model.config.model_type if model_type == "git": UpperCamelCase_ = self.image_processor(images=__UpperCamelCase , return_tensors=self.framework ) UpperCamelCase_ = self.tokenizer(text=__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids UpperCamelCase_ = [self.tokenizer.cls_token_id] + input_ids UpperCamelCase_ = torch.tensor(__UpperCamelCase ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": UpperCamelCase_ = self.image_processor(images=__UpperCamelCase , header_text=__UpperCamelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation UpperCamelCase_ = self.image_processor(images=__UpperCamelCase , return_tensors=self.framework ) UpperCamelCase_ = self.tokenizer(__UpperCamelCase , return_tensors=self.framework ) model_inputs.update(__UpperCamelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: UpperCamelCase_ = self.image_processor(images=__UpperCamelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: UpperCamelCase_ = None return model_inputs def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , __UpperCamelCase ) and all(x is None for x in model_inputs["""input_ids"""] ) ): UpperCamelCase_ = None if generate_kwargs is None: UpperCamelCase_ = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. UpperCamelCase_ = model_inputs.pop(self.model.main_input_name ) UpperCamelCase_ = self.model.generate(__UpperCamelCase , **__UpperCamelCase , **__UpperCamelCase ) return model_outputs def lowerCamelCase_ ( self , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = [] for output_ids in model_outputs: UpperCamelCase_ = { """generated_text""": self.tokenizer.decode( __UpperCamelCase , skip_special_tokens=__UpperCamelCase , ) } records.append(__UpperCamelCase ) return records
360
from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class lowercase_ ( __SCREAMING_SNAKE_CASE ): A__ : List[Any] = """EncodecFeatureExtractor""" A__ : Tuple = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" super().__init__(__UpperCamelCase , __UpperCamelCase ) UpperCamelCase_ = self.feature_extractor UpperCamelCase_ = False def lowerCamelCase_ ( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True ): """simple docstring""" return self.tokenizer.get_decoder_prompt_ids(task=__UpperCamelCase , language=__UpperCamelCase , no_timestamps=__UpperCamelCase ) def __call__( self , *__UpperCamelCase , **__UpperCamelCase ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__UpperCamelCase , **__UpperCamelCase ) UpperCamelCase_ = kwargs.pop("""audio""" , __UpperCamelCase ) UpperCamelCase_ = kwargs.pop("""sampling_rate""" , __UpperCamelCase ) UpperCamelCase_ = kwargs.pop("""text""" , __UpperCamelCase ) if len(__UpperCamelCase ) > 0: UpperCamelCase_ = args[0] UpperCamelCase_ = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if text is not None: UpperCamelCase_ = self.tokenizer(__UpperCamelCase , **__UpperCamelCase ) if audio is not None: UpperCamelCase_ = self.feature_extractor(__UpperCamelCase , *__UpperCamelCase , sampling_rate=__UpperCamelCase , **__UpperCamelCase ) if audio is None: return inputs elif text is None: return audio_inputs else: UpperCamelCase_ = audio_inputs["""input_values"""] if "padding_mask" in audio_inputs: UpperCamelCase_ = audio_inputs["""padding_mask"""] return inputs def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ): """simple docstring""" UpperCamelCase_ = kwargs.pop("""audio""" , __UpperCamelCase ) UpperCamelCase_ = kwargs.pop("""padding_mask""" , __UpperCamelCase ) if len(__UpperCamelCase ) > 0: UpperCamelCase_ = args[0] UpperCamelCase_ = args[1:] if audio_values is not None: return self._decode_audio(__UpperCamelCase , padding_mask=__UpperCamelCase ) else: return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ): """simple docstring""" return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ): """simple docstring""" UpperCamelCase_ = to_numpy(__UpperCamelCase ) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = audio_values.shape if padding_mask is None: return list(__UpperCamelCase ) UpperCamelCase_ = to_numpy(__UpperCamelCase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) UpperCamelCase_ = seq_len - padding_mask.shape[-1] UpperCamelCase_ = 1 - self.feature_extractor.padding_value UpperCamelCase_ = np.pad(__UpperCamelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__UpperCamelCase ) UpperCamelCase_ = audio_values.tolist() for i in range(__UpperCamelCase ): UpperCamelCase_ = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] UpperCamelCase_ = sliced_audio.reshape(__UpperCamelCase , -1 ) return audio_values
261
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'SEW_PRETRAINED_MODEL_ARCHIVE_LIST', 'SEWForCTC', 'SEWForSequenceClassification', 'SEWModel', 'SEWPreTrainedModel', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
16
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) a__ : int = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : str = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[Any] = ['LayoutLMv3FeatureExtractor'] a__ : str = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
349
0
'''simple docstring''' import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy UpperCAmelCase = logging.getLogger(__name__) UpperCAmelCase = 'pytorch_model.bin' @dataclasses.dataclass class __snake_case: '''simple docstring''' UpperCAmelCase : str = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) UpperCAmelCase : Optional[str] = dataclasses.field( default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , ) @dataclasses.dataclass class __snake_case: '''simple docstring''' UpperCAmelCase : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} ) UpperCAmelCase : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} ) UpperCAmelCase : Optional[str] = dataclasses.field( default=__snake_case , metadata={"help": "A csv or a json file containing the validation data."} ) UpperCAmelCase : Optional[str] = dataclasses.field( default=__snake_case , metadata={"help": "The name of the task to train on."} , ) UpperCAmelCase : Optional[List[str]] = dataclasses.field( default=__snake_case , metadata={"help": "The list of labels for the task."} ) @dataclasses.dataclass class __snake_case: '''simple docstring''' UpperCAmelCase : str = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) UpperCAmelCase : Optional[str] = dataclasses.field( default="accuracy" , metadata={"help": "The evaluation metric used for the task."} ) UpperCAmelCase : Optional[str] = dataclasses.field( default="no" , metadata={ "help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]" } , ) UpperCAmelCase : Optional[int] = dataclasses.field( default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) UpperCAmelCase : Optional[float] = dataclasses.field( default=0.0 , metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." } , ) UpperCAmelCase : Optional[bool] = dataclasses.field( default=__snake_case , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , ) UpperCAmelCase : Optional[bool] = dataclasses.field( default=__snake_case , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , ) UpperCAmelCase : Optional[bool] = dataclasses.field( default=__snake_case , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , ) UpperCAmelCase : Optional[float] = dataclasses.field( default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , ) UpperCAmelCase : Optional[int] = dataclasses.field( default=1_00 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) UpperCAmelCase : Optional[int] = dataclasses.field( default=__snake_case , metadata={"help": "Random seed for initialization."} , ) def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: lowerCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 lowerCAmelCase = int(eval_result * len(_a ) ) print(_a ) lowerCAmelCase = dataset.sort("""probability""" , reverse=_a ) lowerCAmelCase = dataset.select(range(_a ) ) lowerCAmelCase = dataset.remove_columns(["""label""", """probability"""] ) lowerCAmelCase = dataset.rename_column("""prediction""" , """label""" ) lowerCAmelCase = dataset.map(lambda _SCREAMING_SNAKE_CASE : {"label": idalabel[example["label"]]} ) lowerCAmelCase = dataset.shuffle(seed=args.seed ) lowerCAmelCase = os.path.join(_a , f'train_pseudo.{args.data_file_extension}' ) if args.data_file_extension == "csv": dataset.to_csv(_a , index=_a ) else: dataset.to_json(_a ) def _snake_case ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : int ) -> Dict: """simple docstring""" lowerCAmelCase = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() lowerCAmelCase = STModelArguments(model_name_or_path=_a ) lowerCAmelCase = STDataArguments(train_file=_a , infer_file=_a ) lowerCAmelCase = STTrainingArguments(output_dir=_a ) lowerCAmelCase = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(_a ).items(): setattr(_a , _a , _a ) for key, value in kwargs.items(): if hasattr(_a , _a ): setattr(_a , _a , _a ) # Sanity checks lowerCAmelCase = {} lowerCAmelCase = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None lowerCAmelCase = args.train_file lowerCAmelCase = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None lowerCAmelCase = args.eval_file for key in data_files: lowerCAmelCase = data_files[key].split(""".""" )[-1] assert extension in ["csv", "json"], f'`{key}_file` should be a csv or a json file.' if args.data_file_extension is None: lowerCAmelCase = extension else: assert extension == args.data_file_extension, f'`{key}_file` should be a {args.data_file_extension} file`.' assert ( args.eval_metric in datasets.list_metrics() ), f'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.' # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("""Creating the initial data directory for self-training...""" ) lowerCAmelCase = f'{args.output_dir}/self-train_iter-{{}}'.format lowerCAmelCase = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=_a ) os.makedirs(_a , exist_ok=_a ) accelerator.wait_for_everyone() lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = 0 lowerCAmelCase = False # Show the progress bar lowerCAmelCase = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): lowerCAmelCase = data_dir_format(_a ) assert os.path.exists(_a ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 lowerCAmelCase = os.path.join(_a , """stage-1""" ) lowerCAmelCase = { """accelerator""": accelerator, """model_name_or_path""": args.model_name_or_path, """cache_dir""": args.cache_dir, """do_train""": True, """train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""], """do_eval""": True if args.eval_file is not None else False, """eval_file""": data_files["""eval"""], """do_predict""": True, """infer_file""": data_files["""infer"""], """task_name""": args.task_name, """label_list""": args.label_list, """output_dir""": current_output_dir, """eval_metric""": args.eval_metric, """evaluation_strategy""": args.evaluation_strategy, """early_stopping_patience""": args.early_stopping_patience, """early_stopping_threshold""": args.early_stopping_threshold, """seed""": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(_a , _a ): arguments_dict.update({key: value} ) lowerCAmelCase = os.path.join(_a , """best-checkpoint""" , _a ) if os.path.exists(_a ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , _a , _a , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , _a ) finetune(**_a ) accelerator.wait_for_everyone() assert os.path.exists(_a ) logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , _a ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data lowerCAmelCase = os.path.join(_a , """best-checkpoint""" ) lowerCAmelCase = os.path.join(_a , """stage-2""" ) # Update arguments_dict lowerCAmelCase = model_path lowerCAmelCase = data_files["""train"""] lowerCAmelCase = current_output_dir lowerCAmelCase = os.path.join(_a , """best-checkpoint""" , _a ) if os.path.exists(_a ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , _a , _a , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , _a ) finetune(**_a ) accelerator.wait_for_everyone() assert os.path.exists(_a ) logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , _a ) lowerCAmelCase = iteration lowerCAmelCase = data_dir_format(iteration + 1 ) lowerCAmelCase = AutoConfig.from_pretrained(os.path.join(_a , """best-checkpoint""" ) ) lowerCAmelCase = config.idalabel lowerCAmelCase = os.path.join(_a , """eval_results_best-checkpoint.json""" ) lowerCAmelCase = os.path.join(_a , """test_results_best-checkpoint.json""" ) assert os.path.exists(_a ) with open(_a , """r""" ) as f: lowerCAmelCase = float(json.load(_a )[args.eval_metric] ) lowerCAmelCase = os.path.join(_a , """infer_output_best-checkpoint.csv""" ) assert os.path.exists(_a ) # Loading the dataset from local csv or json files. lowerCAmelCase = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""] lowerCAmelCase = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""] if accelerator.is_main_process: os.makedirs(_a , exist_ok=_a ) shutil.copy(_a , os.path.join(_a , f'eval_results_iter-{iteration}.json' ) ) if os.path.exists(_a ): shutil.copy(_a , os.path.join(_a , f'test_results_iter-{iteration}.json' ) ) create_pseudo_labeled_data(_a , _a , _a , _a , _a , _a ) accelerator.wait_for_everyone() lowerCAmelCase = os.path.join(_a , f'train_pseudo.{args.data_file_extension}' ) if args.evaluation_strategy != IntervalStrategy.NO.value: lowerCAmelCase = eval_result if best_iteration is None: lowerCAmelCase = new_iteration lowerCAmelCase = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: lowerCAmelCase = new_iteration lowerCAmelCase = new_eval_result lowerCAmelCase = 0 else: if new_eval_result == best_eval_result: lowerCAmelCase = new_iteration lowerCAmelCase = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: lowerCAmelCase = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("""Best iteration: %d""" , _a ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _a ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_a , f'eval_results_iter-{iteration}.json' ) , os.path.join(_a , """eval_results_best-iteration.json""" ) , ) else: # Assume that the last iteration is the best logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _a ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_a , f'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(_a , """eval_results_best-iteration.json""" ) , )
365
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class __snake_case: '''simple docstring''' def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.0_2 , A_=3 , A_=4 , A_=None , ) -> Dict: lowerCAmelCase = parent lowerCAmelCase = 13 lowerCAmelCase = 7 lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = 99 lowerCAmelCase = 384 lowerCAmelCase = 2 lowerCAmelCase = 4 lowerCAmelCase = 37 lowerCAmelCase = """gelu""" lowerCAmelCase = 0.1 lowerCAmelCase = 0.1 lowerCAmelCase = 512 lowerCAmelCase = 16 lowerCAmelCase = 2 lowerCAmelCase = 0.0_2 lowerCAmelCase = 3 lowerCAmelCase = 4 lowerCAmelCase = 128 lowerCAmelCase = 2 lowerCAmelCase = 9 lowerCAmelCase = 1 lowerCAmelCase = None def __snake_case ( self ) -> Optional[int]: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int: lowerCAmelCase = TFConvBertModel(config=A_ ) lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} lowerCAmelCase = [input_ids, input_mask] lowerCAmelCase = model(A_ ) lowerCAmelCase = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]: lowerCAmelCase = TFConvBertForMaskedLM(config=A_ ) lowerCAmelCase = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCAmelCase = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]: lowerCAmelCase = self.num_labels lowerCAmelCase = TFConvBertForSequenceClassification(config=A_ ) lowerCAmelCase = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCAmelCase = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any: lowerCAmelCase = self.num_choices lowerCAmelCase = TFConvBertForMultipleChoice(config=A_ ) lowerCAmelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } lowerCAmelCase = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]: lowerCAmelCase = self.num_labels lowerCAmelCase = TFConvBertForTokenClassification(config=A_ ) lowerCAmelCase = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCAmelCase = model(A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]: lowerCAmelCase = TFConvBertForQuestionAnswering(config=A_ ) lowerCAmelCase = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowerCAmelCase = model(A_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __snake_case ( self ) -> Any: lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ) = config_and_inputs lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class __snake_case( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Optional[int] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) UpperCAmelCase : Union[str, Any] = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase : Union[str, Any] = False UpperCAmelCase : Optional[int] = False UpperCAmelCase : Dict = False def __snake_case ( self ) -> Optional[int]: lowerCAmelCase = TFConvBertModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __snake_case ( self ) -> Tuple: self.config_tester.run_common_tests() def __snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __snake_case ( self ) -> Tuple: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*A_ ) def __snake_case ( self ) -> Optional[int]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*A_ ) def __snake_case ( self ) -> List[str]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) def __snake_case ( self ) -> str: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A_ ) def __snake_case ( self ) -> Tuple: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) @slow def __snake_case ( self ) -> Any: lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = True lowerCAmelCase = True if hasattr(A_ , """use_cache""" ): lowerCAmelCase = True lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) lowerCAmelCase = getattr(self.model_tester , """key_length""" , A_ ) for model_class in self.all_model_classes: lowerCAmelCase = self._prepare_for_class(A_ , A_ ) lowerCAmelCase = model_class(A_ ) lowerCAmelCase = len(model(A_ ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(A_ , saved_model=A_ ) lowerCAmelCase = os.path.join(A_ , """saved_model""" , """1""" ) lowerCAmelCase = tf.keras.models.load_model(A_ ) lowerCAmelCase = model(A_ ) if self.is_encoder_decoder: lowerCAmelCase = outputs["""encoder_hidden_states"""] lowerCAmelCase = outputs["""encoder_attentions"""] else: lowerCAmelCase = outputs["""hidden_states"""] lowerCAmelCase = outputs["""attentions"""] self.assertEqual(len(A_ ) , A_ ) lowerCAmelCase = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(A_ ) , A_ ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __snake_case ( self ) -> Optional[Any]: lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) self.assertIsNotNone(A_ ) def __snake_case ( self ) -> str: lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = True lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length ) lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) lowerCAmelCase = getattr(self.model_tester , """key_length""" , A_ ) lowerCAmelCase = getattr(self.model_tester , """key_length""" , A_ ) def check_decoder_attentions_output(A_ ): lowerCAmelCase = len(A_ ) self.assertEqual(out_len % 2 , 0 ) lowerCAmelCase = outputs.decoder_attentions self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(A_ ): lowerCAmelCase = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: lowerCAmelCase = True lowerCAmelCase = False lowerCAmelCase = model_class(A_ ) lowerCAmelCase = model(self._prepare_for_class(A_ , A_ ) ) lowerCAmelCase = len(A_ ) self.assertEqual(config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) if self.is_encoder_decoder: lowerCAmelCase = model_class(A_ ) lowerCAmelCase = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(config.output_hidden_states , A_ ) check_decoder_attentions_output(A_ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCAmelCase = True lowerCAmelCase = model_class(A_ ) lowerCAmelCase = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) # Check attention is always last and order is fine lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = model_class(A_ ) lowerCAmelCase = model(self._prepare_for_class(A_ , A_ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) ) self.assertEqual(model.config.output_hidden_states , A_ ) check_encoder_attentions_output(A_ ) @require_tf class __snake_case( unittest.TestCase ): '''simple docstring''' @slow def __snake_case ( self ) -> Any: lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCAmelCase = model(A_ )[0] lowerCAmelCase = [1, 6, 768] self.assertEqual(output.shape , A_ ) lowerCAmelCase = tf.constant( [ [ [-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2], [0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4], [0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 )
187
0
def UpperCamelCase_( _snake_case : int ): """simple docstring""" if divisor % 5 == 0 or divisor % 2 == 0: return 0 __a =1 __a =1 while repunit: __a =(10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def UpperCamelCase_( _snake_case : int = 1000000 ): """simple docstring""" __a =limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(_snake_case ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'''{solution() = }''')
218
import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device _lowerCAmelCase : Optional[Any] = False class __magic_name__ ( unittest.TestCase ): pass @slow @require_torch_gpu class __magic_name__ ( unittest.TestCase ): def __magic_name__ ( self ) -> Optional[int]: '''simple docstring''' __a =VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' ) pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) __a =load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) __a =torch.manual_seed(0 ) __a =pipe( image=__snake_case , generator=__snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images __a =image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) __a =np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
218
1
"""simple docstring""" import os import jsonlines import numpy as np from tqdm import tqdm _lowercase = 20_48 _lowercase = 40_96 _lowercase = 42 _lowercase = os.environ.pop('PROCESS_TRAIN', 'false') _lowercase = {'null': 0, 'short': 1, 'long': 2, 'yes': 3, 'no': 4} def lowercase__ ( snake_case_ :Tuple ): def choose_first(snake_case_ :Optional[int] , snake_case_ :Optional[int]=False ): assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) if len(UpperCamelCase__ ) == 1: __UpperCAmelCase = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: __UpperCAmelCase = {k: [a[k]] for k in a} if len(a['''start_token'''] ) > 0: break return a __UpperCAmelCase = {'''id''': example['''id''']} __UpperCAmelCase = example['''annotations'''] __UpperCAmelCase = annotation['''yes_no_answer'''] if 0 in yes_no_answer or 1 in yes_no_answer: __UpperCAmelCase = ['''yes'''] if 1 in yes_no_answer else ['''no'''] __UpperCAmelCase = __UpperCAmelCase = [] __UpperCAmelCase = __UpperCAmelCase = [] __UpperCAmelCase = ['''<cls>'''] else: __UpperCAmelCase = ['''short'''] __UpperCAmelCase = choose_first(annotation['''short_answers'''] ) if len(out['''start_token'''] ) == 0: # answer will be long if short is not available __UpperCAmelCase = ['''long'''] __UpperCAmelCase = choose_first(annotation['''long_answer'''] , is_long_answer=UpperCamelCase__ ) __UpperCAmelCase = [] answer.update(UpperCamelCase__ ) # disregard some samples if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]: __UpperCAmelCase = True else: __UpperCAmelCase = False __UpperCAmelCase = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text'''] if not all(isinstance(answer[k] , UpperCamelCase__ ) for k in cols ): raise ValueError('''Issue in ID''' , example['''id'''] ) return answer def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :int=False ): __UpperCAmelCase = _get_single_answer(UpperCamelCase__ ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element __UpperCAmelCase = example['''document''']['''tokens'''] __UpperCAmelCase = [] for i in range(len(doc['''token'''] ) ): if not doc["is_html"][i]: context.append(doc['''token'''][i] ) return { "context": " ".join(UpperCamelCase__ ), "answer": { "start_token": -100, # ignore index in cross-entropy "end_token": -100, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples __UpperCAmelCase = ['''start_token''', '''end_token'''] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 __UpperCAmelCase = example['''document''']['''tokens'''] __UpperCAmelCase = answer['''start_token'''] __UpperCAmelCase = answer['''end_token'''] __UpperCAmelCase = [] for i in range(len(doc['''token'''] ) ): if not doc["is_html"][i]: context.append(doc['''token'''][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 __UpperCAmelCase = ''' '''.join(context[start_token:end_token] ) # checking above code if assertion: __UpperCAmelCase = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']] __UpperCAmelCase = doc['''token'''][answer['''start_token'''] : answer['''end_token''']] __UpperCAmelCase = ''' '''.join([old[i] for i in range(len(UpperCamelCase__ ) ) if not is_html[i]] ) if new != old: print('''ID:''' , example['''id'''] ) print('''New:''' , UpperCamelCase__ , end='''\n''' ) print('''Old:''' , UpperCamelCase__ , end='''\n\n''' ) return { "context": " ".join(UpperCamelCase__ ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Optional[int] , snake_case_ :str=2_048 , snake_case_ :List[Any]=4_096 , snake_case_ :List[Any]=True ): __UpperCAmelCase = get_context_and_ans(UpperCamelCase__ , assertion=UpperCamelCase__ ) __UpperCAmelCase = out['''answer'''] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } __UpperCAmelCase = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids __UpperCAmelCase = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element __UpperCAmelCase = [] __UpperCAmelCase = [] __UpperCAmelCase = input_ids[:q_len] __UpperCAmelCase = range(UpperCamelCase__ , len(UpperCamelCase__ ) , max_length - doc_stride ) for i in doc_start_indices: __UpperCAmelCase = i + max_length - q_len __UpperCAmelCase = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer['''category'''][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-100] * len(UpperCamelCase__ ), "end_token": [-100] * len(UpperCamelCase__ ), "category": category, }, } __UpperCAmelCase = out['''context'''].split() __UpperCAmelCase = splitted_context[answer['''end_token''']] __UpperCAmelCase = len( tokenizer( ''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=UpperCamelCase__ , ).input_ids ) __UpperCAmelCase = len( tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=UpperCamelCase__ ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token __UpperCAmelCase = len(tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 __UpperCAmelCase = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive __UpperCAmelCase = answer['''start_token'''] __UpperCAmelCase = answer['''end_token'''] if assertion: __UpperCAmelCase = tokenizer.decode(UpperCamelCase__ ) if answer["span"] != new: print('''ISSUE IN TOKENIZATION''' ) print('''OLD:''' , answer['''span'''] ) print('''NEW:''' , UpperCamelCase__ , end='''\n\n''' ) if len(UpperCamelCase__ ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } __UpperCAmelCase = input_ids[:q_len] __UpperCAmelCase = range(UpperCamelCase__ , len(UpperCamelCase__ ) , max_length - doc_stride ) __UpperCAmelCase = [] __UpperCAmelCase = [] __UpperCAmelCase = [] __UpperCAmelCase = [] # null, yes, no, long, short for i in doc_start_indices: __UpperCAmelCase = i + max_length - q_len __UpperCAmelCase = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: __UpperCAmelCase = start_token - i + q_len __UpperCAmelCase = end_token - i + q_len answers_category.append(answer['''category'''][0] ) # ["short"] -> "short" else: __UpperCAmelCase = -100 __UpperCAmelCase = -100 answers_category.append('''null''' ) __UpperCAmelCase = inputs[-1][start_token : end_token + 1] answers_start_token.append(UpperCamelCase__ ) answers_end_token.append(UpperCamelCase__ ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print('''ISSUE in strided for ID:''' , example['''id'''] ) print('''New:''' , tokenizer.decode(UpperCamelCase__ ) ) print('''Old:''' , tokenizer.decode(UpperCamelCase__ ) , end='''\n\n''' ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def lowercase__ ( snake_case_ :Dict , snake_case_ :int , snake_case_ :Dict=2_048 , snake_case_ :Optional[int]=4_096 , snake_case_ :Tuple=False ): __UpperCAmelCase = get_strided_contexts_and_ans( UpperCamelCase__ , UpperCamelCase__ , doc_stride=UpperCamelCase__ , max_length=UpperCamelCase__ , assertion=UpperCamelCase__ , ) return example def lowercase__ ( snake_case_ :int , snake_case_ :Optional[int] ): with jsonlines.open(UpperCamelCase__ , '''a''' ) as writer: for example in tqdm(UpperCamelCase__ , total=len(UpperCamelCase__ ) , desc='''Saving samples ... ''' ): __UpperCAmelCase = example['''labels'''] for ids, start, end, cat in zip( example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { '''input_ids''': ids, '''start_token''': start, '''end_token''': end, '''category''': CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer _lowercase = load_dataset('natural_questions') _lowercase = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') _lowercase = data['train' if PROCESS_TRAIN == 'true' else 'validation'] _lowercase = { 'tokenizer': tokenizer, 'doc_stride': DOC_STRIDE, 'max_length': MAX_LENGTH, 'assertion': False, } _lowercase = data.map(prepare_inputs, fn_kwargs=fn_kwargs) _lowercase = data.remove_columns(['annotations', 'document', 'id', 'question']) print(data) np.random.seed(SEED) _lowercase = 'nq-training.jsonl' if PROCESS_TRAIN == 'true' else 'nq-validation.jsonl' save_to_disk(data, file_name=cache_file_name)
363
"""simple docstring""" from __future__ import annotations def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float ): if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float , ): if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float , ): if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
86
0
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {"vocab_file": "spiece.model"} UpperCAmelCase__ = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } UpperCAmelCase__ = { "AI-Sweden/gpt-sw3-126m": 2048, "AI-Sweden/gpt-sw3-350m": 2048, "AI-Sweden/gpt-sw3-1.6b": 2048, "AI-Sweden/gpt-sw3-6.7b": 2048, "AI-Sweden/gpt-sw3-20b": 2048, } class lowercase_ ( lowercase ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case = ['''input_ids''', '''attention_mask'''] def __init__( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : List[Any] , ) ->None: """simple docstring""" a = {} if sp_model_kwargs is None else sp_model_kwargs a = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) a = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing a = '''<|endoftext|>''' if eos_token is None else eos_token a = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: a = unk_token if pad_token is None else pad_token a = eos_token if bos_token is None else bos_token else: a = '''<pad>''' if pad_token is None else pad_token a = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) a = do_lower_case a = remove_space a = keep_accents a = vocab_file a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) # Used for whitespace normalization in input texts # fmt : off a = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing a = re.compile( F"""[{''.join(map(__UpperCAmelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]""" ) def __getstate__( self : int ) ->Any: """simple docstring""" a = self.__dict__.copy() a = None return state def __setstate__( self : int , __UpperCAmelCase : List[Any] ) ->Optional[int]: """simple docstring""" a = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): a = {} a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def __lowerCAmelCase ( self : Optional[Any] ) ->int: """simple docstring""" return len(self.sp_model ) def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str ) ->str: """simple docstring""" a = self.non_printing_characters_re.sub('''''' , __UpperCAmelCase ) # Normalize whitespaces a = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization a = unicodedata.normalize('''NFC''' , __UpperCAmelCase ) return text def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str , **__UpperCAmelCase : Tuple ) ->List[str]: """simple docstring""" a = self.preprocess_text(__UpperCAmelCase ) return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str ) ->int: """simple docstring""" return self.sp_model.PieceToId(__UpperCAmelCase ) def __lowerCAmelCase ( self : Any , __UpperCAmelCase : int ) ->str: """simple docstring""" return self.sp_model.IdToPiece(__UpperCAmelCase ) @staticmethod def __lowerCAmelCase ( __UpperCAmelCase : str ) ->str: """simple docstring""" return out_string def __lowerCAmelCase ( self : str , __UpperCAmelCase : List[str] ) ->str: """simple docstring""" a = [] a = '''''' a = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase ) + token a = True a = [] else: current_sub_tokens.append(__UpperCAmelCase ) a = False out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string def __lowerCAmelCase ( self : List[str] ) ->Dict[str, int]: """simple docstring""" a = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]: """simple docstring""" if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , '''wb''' ) as fi: a = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,) def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : Union[str, bool] = False ) ->Union[List[int], List[List[int]], "torch.Tensor"]: """simple docstring""" if isinstance(__UpperCAmelCase , __UpperCAmelCase ): a = self.preprocess_text(__UpperCAmelCase ) a = self.sp_model.encode(__UpperCAmelCase ) else: a = [self.preprocess_text(__UpperCAmelCase ) for t in text] a = self.sp_model.encode(__UpperCAmelCase ) if return_tensors is True or return_tensors == "pt": a = torch.tensor(__UpperCAmelCase ) return token_ids def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Union[int, List[int]] ) ->str: """simple docstring""" return self.sp_model.decode(__UpperCAmelCase ) def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : "Conversation" ) ->List[int]: """simple docstring""" a = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()] a = ( F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__UpperCAmelCase ) + F"""{self.bos_token}Bot:""" ) return self.encode(text=__UpperCAmelCase )
0
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( __snake_case : int = 4_00_00_00 ): '''simple docstring''' lowercase = [] lowercase , lowercase = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__snake_case ) lowercase , lowercase = b, a + b return sum(__snake_case ) if __name__ == "__main__": print(F'''{solution() = }''')
220
0
def A ( a_ ,a_ ,a_ ,a_ ,a_ ) -> int: if index == number_of_items: return 0 __UpperCamelCase : Union[str, Any] =0 __UpperCamelCase : Tuple =0 __UpperCamelCase : Tuple =knapsack(a_ ,a_ ,a_ ,a_ ,index + 1 ) if weights[index] <= max_weight: __UpperCamelCase : Union[str, Any] =values[index] + knapsack( a_ ,a_ ,a_ ,max_weight - weights[index] ,index + 1 ) return max(a_ ,a_ ) if __name__ == "__main__": import doctest doctest.testmod()
370
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING A_ :List[str] = { '''facebook/mask2former-swin-small-coco-instance''': ( '''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json''' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } A_ :int = logging.get_logger(__name__) class __A ( a ): """simple docstring""" UpperCamelCase__ : Union[str, Any] ="""mask2former""" UpperCamelCase__ : Tuple =["""swin"""] UpperCamelCase__ : Dict ={"""hidden_size""": """hidden_dim"""} def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ): """simple docstring""" if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) __UpperCamelCase : Optional[int] =CONFIG_MAPPING['swin']( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __UpperCamelCase : List[str] =backbone_config.pop('model_type' ) __UpperCamelCase : str =CONFIG_MAPPING[backbone_model_type] __UpperCamelCase : List[Any] =config_class.from_dict(lowerCamelCase__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ' f'Supported model types: {",".join(self.backbones_supported )}' ) __UpperCamelCase : Dict =backbone_config __UpperCamelCase : Optional[int] =feature_size __UpperCamelCase : Union[str, Any] =mask_feature_size __UpperCamelCase : Tuple =hidden_dim __UpperCamelCase : Optional[int] =encoder_feedforward_dim __UpperCamelCase : Optional[int] =activation_function __UpperCamelCase : Dict =encoder_layers __UpperCamelCase : List[Any] =decoder_layers __UpperCamelCase : int =num_attention_heads __UpperCamelCase : Optional[Any] =dropout __UpperCamelCase : int =dim_feedforward __UpperCamelCase : Any =pre_norm __UpperCamelCase : Union[str, Any] =enforce_input_projection __UpperCamelCase : str =common_stride __UpperCamelCase : List[str] =ignore_value __UpperCamelCase : Optional[int] =num_queries __UpperCamelCase : Any =no_object_weight __UpperCamelCase : int =class_weight __UpperCamelCase : str =mask_weight __UpperCamelCase : Dict =dice_weight __UpperCamelCase : str =train_num_points __UpperCamelCase : str =oversample_ratio __UpperCamelCase : int =importance_sample_ratio __UpperCamelCase : List[str] =init_std __UpperCamelCase : Union[str, Any] =init_xavier_std __UpperCamelCase : Any =use_auxiliary_loss __UpperCamelCase : Tuple =feature_strides __UpperCamelCase : Dict =output_auxiliary_logits __UpperCamelCase : Union[str, Any] =decoder_layers super().__init__(**lowerCamelCase__ ) @classmethod def __lowercase ( cls , lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" return cls( backbone_config=lowerCamelCase__ , **lowerCamelCase__ , ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =copy.deepcopy(self.__dict__ ) __UpperCamelCase : List[Any] =self.backbone_config.to_dict() __UpperCamelCase : Union[str, Any] =self.__class__.model_type return output
245
0
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device A__: str = False class A__ ( unittest.TestCase ): pass @nightly @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self :Any ) -> Union[str, Any]: '''simple docstring''' _a : List[str] =VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) _a : Optional[int] =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _a : Tuple =torch.manual_seed(0 ) _a : List[Any] =pipe.dual_guided( prompt="""first prompt""" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowercase_ ) _a : List[str] =VersatileDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) _a : Any =generator.manual_seed(0 ) _a : Optional[Any] =pipe.dual_guided( prompt="""first prompt""" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __UpperCAmelCase ( self :Union[str, Any] ) -> int: '''simple docstring''' _a : str =VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) _a : Tuple ="cyberpunk 2077" _a : str =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _a : Any =torch.manual_seed(0 ) _a : Any =pipe.dual_guided( prompt=lowercase_ , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""" , ).images _a : Optional[int] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _a : Optional[int] =np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 _a : str ="A painting of a squirrel eating a burger " _a : Any =torch.manual_seed(0 ) _a : List[str] =pipe.text_to_image( prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""" ).images _a : Any =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _a : Optional[int] =np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 _a : Union[str, Any] =pipe.image_variation(lowercase_ , generator=lowercase_ , output_type="""numpy""" ).images _a : Optional[Any] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _a : int =np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
276
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCamelCase = 256 class snake_case_ ( __A ): __A : str = ["melgan"] def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None: super().__init__() # From MELGAN lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training. lowercase__ : str = 4.0 # Largest value for most examples lowercase__ : Any = 1_28 self.register_modules( notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]: lowercase__ , lowercase__ : int = output_range if clip: lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value ) # Scale to [0, 1]. lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]: lowercase__ , lowercase__ : Tuple = input_range lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs # Scale to [0, 1]. lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]: lowercase__ : Optional[Any] = input_tokens > 0 lowercase__ , lowercase__ : int = self.notes_encoder( encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ ) lowercase__ , lowercase__ : List[Any] = self.continuous_encoder( encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple: lowercase__ : Union[str, Any] = noise_time if not torch.is_tensor(lowercase_ ): lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0: lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowercase__ : str = self.decoder( encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ ) return logits @torch.no_grad() def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]: if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(lowercase_ )}.''' ) lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa ) lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) for i, encoder_input_tokens in enumerate(lowercase_ ): if i == 0: lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase__ : str = ones lowercase__ : str = self.scale_features( lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ ) lowercase__ : str = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase__ : List[str] = randn_tensor( shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(lowercase_ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[int] = self.decode( encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] ) lowercase__ : List[str] = mel[:1] lowercase__ : Optional[int] = mel.cpu().float().numpy() lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ ) logger.info("Generated segment" , lowercase_ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." ) elif output_type == "numpy" and self.melgan is None: raise ValueError( "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." ) if output_type == "numpy": lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase__ : Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=lowercase_ )
87
0
"""simple docstring""" import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , ): __lowerCamelCase : List[str] = {} if train_file is not None: __lowerCamelCase : Tuple = [train_file] if eval_file is not None: __lowerCamelCase : List[str] = [eval_file] if test_file is not None: __lowerCamelCase : List[Any] = [test_file] __lowerCamelCase : List[Any] = datasets.load_dataset('csv' , data_files=__lowerCAmelCase ) __lowerCamelCase : Optional[int] = list(ds[list(files.keys() )[0]].features.keys() ) __lowerCamelCase : List[str] = features_name.pop(__lowerCAmelCase ) __lowerCamelCase : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) ) __lowerCamelCase : Optional[Any] = {label: i for i, label in enumerate(__lowerCAmelCase )} __lowerCamelCase : int = tokenizer.model_input_names __lowerCamelCase : List[Any] = {} if len(__lowerCAmelCase ) == 1: for k in files.keys(): __lowerCamelCase : Optional[int] = ds[k].map( lambda SCREAMING_SNAKE_CASE__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding='max_length' ) , batched=__lowerCAmelCase , ) elif len(__lowerCAmelCase ) == 2: for k in files.keys(): __lowerCamelCase : Optional[Any] = ds[k].map( lambda SCREAMING_SNAKE_CASE__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding='max_length' , ) , batched=__lowerCAmelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __lowerCamelCase : int = {k: v for k, v in ex.items() if k in input_names} __lowerCamelCase : Union[str, Any] = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __lowerCamelCase : Optional[Any] = {k: v for k, v in ex.items() if k in input_names} __lowerCamelCase : List[str] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __lowerCamelCase : List[Any] = {k: v for k, v in ex.items() if k in input_names} __lowerCamelCase : Any = labelaid[ex[label_name]] yield (d, label) __lowerCamelCase : List[Any] = ( tf.data.Dataset.from_generator( __lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __lowerCamelCase : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __lowerCamelCase : Tuple = ( tf.data.Dataset.from_generator( __lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __lowerCamelCase : List[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __lowerCamelCase : List[Any] = ( tf.data.Dataset.from_generator( __lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __lowerCamelCase : Optional[int] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid lowercase_ = logging.getLogger(__name__) @dataclass class A_ : '''simple docstring''' __snake_case = field(metadata={"""help""": """Which column contains the label"""} ) __snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """The path of the training file"""} ) __snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """The path of the development file"""} ) __snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """The path of the test file"""} ) __snake_case = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) @dataclass class A_ : '''simple docstring''' __snake_case = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. __snake_case = field( default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) def UpperCamelCase__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCamelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info( f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ' f'16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase : Union[str, Any] = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__lowerCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) __lowerCamelCase : List[str] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): __lowerCamelCase : Any = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(SCREAMING_SNAKE_CASE__ ) -> Dict: __lowerCamelCase : Optional[int] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __lowerCamelCase : str = TFTrainer( model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowerCamelCase : Union[str, Any] = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __lowerCamelCase : Union[str, Any] = trainer.evaluate() __lowerCamelCase : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' ) with open(__lowerCAmelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(f' {key} = {value}' ) writer.write(f'{key} = {value}\n' ) results.update(__lowerCAmelCase ) return results if __name__ == "__main__": main()
362
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { 'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json', 'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json', 'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json', } class A_ ( __UpperCamelCase ): '''simple docstring''' __snake_case = """owlvit_text_model""" def __init__( self: Optional[int] , a: Dict=4_9408 , a: Optional[Any]=512 , a: Dict=2048 , a: Optional[Any]=12 , a: Tuple=8 , a: Union[str, Any]=16 , a: str="quick_gelu" , a: List[Any]=1e-5 , a: Dict=0.0 , a: Optional[int]=0.0_2 , a: Dict=1.0 , a: Any=0 , a: Union[str, Any]=4_9406 , a: Any=4_9407 , **a: Dict , ): super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a ) __lowerCamelCase : List[Any] = vocab_size __lowerCamelCase : int = hidden_size __lowerCamelCase : Optional[Any] = intermediate_size __lowerCamelCase : List[Any] = num_hidden_layers __lowerCamelCase : Any = num_attention_heads __lowerCamelCase : Union[str, Any] = max_position_embeddings __lowerCamelCase : List[Any] = hidden_act __lowerCamelCase : List[str] = layer_norm_eps __lowerCamelCase : Tuple = attention_dropout __lowerCamelCase : Optional[int] = initializer_range __lowerCamelCase : Tuple = initializer_factor @classmethod def _snake_case ( cls: Dict , a: Union[str, os.PathLike] , **a: Optional[int] ): cls._set_token_in_kwargs(a ) __lowerCamelCase , __lowerCamelCase : Dict = cls.get_config_dict(a , **a ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get('model_type' ) == "owlvit": __lowerCamelCase : Optional[Any] = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(a , **a ) class A_ ( __UpperCamelCase ): '''simple docstring''' __snake_case = """owlvit_vision_model""" def __init__( self: int , a: Tuple=768 , a: int=3072 , a: List[str]=12 , a: Optional[Any]=12 , a: Optional[int]=3 , a: Optional[int]=768 , a: Optional[Any]=32 , a: Optional[int]="quick_gelu" , a: Union[str, Any]=1e-5 , a: Union[str, Any]=0.0 , a: Union[str, Any]=0.0_2 , a: int=1.0 , **a: Union[str, Any] , ): super().__init__(**a ) __lowerCamelCase : str = hidden_size __lowerCamelCase : Tuple = intermediate_size __lowerCamelCase : Dict = num_hidden_layers __lowerCamelCase : Optional[Any] = num_attention_heads __lowerCamelCase : int = num_channels __lowerCamelCase : Optional[Any] = image_size __lowerCamelCase : Tuple = patch_size __lowerCamelCase : List[str] = hidden_act __lowerCamelCase : Tuple = layer_norm_eps __lowerCamelCase : List[Any] = attention_dropout __lowerCamelCase : Tuple = initializer_range __lowerCamelCase : List[Any] = initializer_factor @classmethod def _snake_case ( cls: Optional[int] , a: Union[str, os.PathLike] , **a: int ): cls._set_token_in_kwargs(a ) __lowerCamelCase , __lowerCamelCase : Dict = cls.get_config_dict(a , **a ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get('model_type' ) == "owlvit": __lowerCamelCase : Any = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(a , **a ) class A_ ( __UpperCamelCase ): '''simple docstring''' __snake_case = """owlvit""" __snake_case = True def __init__( self: Dict , a: int=None , a: str=None , a: Tuple=512 , a: Tuple=2.6_5_9_2 , a: int=True , **a: int , ): super().__init__(**a ) if text_config is None: __lowerCamelCase : List[str] = {} logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' ) if vision_config is None: __lowerCamelCase : str = {} logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' ) __lowerCamelCase : List[Any] = OwlViTTextConfig(**a ) __lowerCamelCase : str = OwlViTVisionConfig(**a ) __lowerCamelCase : Union[str, Any] = projection_dim __lowerCamelCase : Tuple = logit_scale_init_value __lowerCamelCase : Dict = return_dict __lowerCamelCase : Tuple = 1.0 @classmethod def _snake_case ( cls: str , a: Union[str, os.PathLike] , **a: List[Any] ): cls._set_token_in_kwargs(a ) __lowerCamelCase , __lowerCamelCase : List[Any] = cls.get_config_dict(a , **a ) if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(a , **a ) @classmethod def _snake_case ( cls: Tuple , a: Dict , a: Dict , **a: str ): __lowerCamelCase : List[str] = {} __lowerCamelCase : List[str] = text_config __lowerCamelCase : Optional[int] = vision_config return cls.from_dict(a , **a ) def _snake_case ( self: Optional[int] ): __lowerCamelCase : Optional[Any] = copy.deepcopy(self.__dict__ ) __lowerCamelCase : List[Any] = self.text_config.to_dict() __lowerCamelCase : List[str] = self.vision_config.to_dict() __lowerCamelCase : Optional[Any] = self.__class__.model_type return output class A_ ( __UpperCamelCase ): '''simple docstring''' @property def _snake_case ( self: str ): return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ] ) @property def _snake_case ( self: Dict ): return OrderedDict( [ ('logits_per_image', {0: 'batch'}), ('logits_per_text', {0: 'batch'}), ('text_embeds', {0: 'batch'}), ('image_embeds', {0: 'batch'}), ] ) @property def _snake_case ( self: int ): return 1e-4 def _snake_case ( self: Any , a: "ProcessorMixin" , a: int = -1 , a: int = -1 , a: Optional["TensorType"] = None , ): __lowerCamelCase : List[str] = super().generate_dummy_inputs( processor.tokenizer , batch_size=a , seq_length=a , framework=a ) __lowerCamelCase : int = super().generate_dummy_inputs( processor.image_processor , batch_size=a , framework=a ) return {**text_input_dict, **image_input_dict} @property def _snake_case ( self: int ): return 14
194
0
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def lowerCamelCase_ ( _a , _a=7 ): """simple docstring""" lowerCAmelCase__ : Any = None if token is not None: lowerCAmelCase__ : Tuple = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'} # The id of a workflow (not of a workflow run) lowerCAmelCase__ : List[str] = '''636036''' lowerCAmelCase__ : str = f'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}' lowerCAmelCase__ : Union[str, Any] = requests.get(_a , headers=_a ).json() return result["workflow_runs"] def lowerCamelCase_ ( _a ): """simple docstring""" lowerCAmelCase__ : List[Any] = get_daily_ci_runs(_a ) lowerCAmelCase__ : Union[str, Any] = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": lowerCAmelCase__ : Tuple = workflow_run['''id'''] break return workflow_run_id def lowerCamelCase_ ( _a , _a , _a ): """simple docstring""" lowerCAmelCase__ : str = get_last_daily_ci_runs(_a ) if workflow_run_id is not None: lowerCAmelCase__ : int = get_artifacts_links(worflow_run_id=_a , token=_a ) for artifact_name in artifact_names: if artifact_name in artifacts_links: lowerCAmelCase__ : List[Any] = artifacts_links[artifact_name] download_artifact( artifact_name=_a , artifact_url=_a , output_dir=_a , token=_a ) def lowerCamelCase_ ( _a , _a , _a ): """simple docstring""" get_last_daily_ci_artifacts(_a , _a , _a ) lowerCAmelCase__ : str = {} for artifact_name in artifact_names: lowerCAmelCase__ : Optional[Any] = os.path.join(_a , f'{artifact_name}.zip' ) if os.path.isfile(_a ): lowerCAmelCase__ : Dict = {} with zipfile.ZipFile(_a ) as z: for filename in z.namelist(): if not os.path.isdir(_a ): # read the file with z.open(_a ) as f: lowerCAmelCase__ : Tuple = f.read().decode('''UTF-8''' ) return results
131
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch lowerCamelCase = logging.get_logger(__name__) class _a ( _lowercase): _a : Optional[Any] = ['''pixel_values'''] def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **_SCREAMING_SNAKE_CASE : int , )-> None: super().__init__(**_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 256} lowerCAmelCase__ : Tuple = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowerCAmelCase__ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' ) lowerCAmelCase__ : List[str] = do_resize lowerCAmelCase__ : Optional[Any] = size lowerCAmelCase__ : Any = resample lowerCAmelCase__ : str = do_center_crop lowerCAmelCase__ : Dict = crop_size lowerCAmelCase__ : str = do_rescale lowerCAmelCase__ : List[str] = rescale_factor lowerCAmelCase__ : int = do_normalize lowerCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase__ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Dict , )-> np.ndarray: lowerCAmelCase__ : str = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) lowerCAmelCase__ : List[str] = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE ) return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : List[str] , )-> np.ndarray: lowerCAmelCase__ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Optional[int] )-> np.ndarray: return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : str , )-> np.ndarray: return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : ImageInput , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = None , _SCREAMING_SNAKE_CASE : bool = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[float] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE : Tuple , )-> Optional[Any]: lowerCAmelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase__ : List[str] = size if size is not None else self.size lowerCAmelCase__ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : str = resample if resample is not None else self.resample lowerCAmelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size lowerCAmelCase__ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' ) lowerCAmelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase__ : str = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase__ : List[Any] = image_mean if image_mean is not None else self.image_mean lowerCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std lowerCAmelCase__ : Optional[int] = make_list_of_images(_SCREAMING_SNAKE_CASE ) if not valid_images(_SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCAmelCase__ : List[Any] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images] if do_resize: lowerCAmelCase__ : Dict = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images] if do_center_crop: lowerCAmelCase__ : Dict = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: lowerCAmelCase__ : List[Any] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: lowerCAmelCase__ : Tuple = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images] lowerCAmelCase__ : Dict = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images] lowerCAmelCase__ : Dict = {'''pixel_values''': images} return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Tuple] = None )-> List[Any]: lowerCAmelCase__ : Union[str, Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ : Tuple = target_sizes.numpy() lowerCAmelCase__ : Tuple = [] for idx in range(len(_SCREAMING_SNAKE_CASE ) ): lowerCAmelCase__ : int = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ : str = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_SCREAMING_SNAKE_CASE ) else: lowerCAmelCase__ : Any = logits.argmax(dim=1 ) lowerCAmelCase__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
131
1
"""simple docstring""" _lowerCAmelCase :Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _lowerCAmelCase :List[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}] _lowerCAmelCase :Union[str, Any] = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
356
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase :int = logging.get_logger(__name__) _lowerCAmelCase :Union[str, Any] = { 'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json', 'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json', 'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json', 'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json', 'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json', 'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json', 'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json', 'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json', 'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json', 'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json', } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''xlm''' a__ ={ '''hidden_size''': '''emb_dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', '''n_words''': '''vocab_size''', # For backward compatibility } def __init__( self , A=3_0_1_4_5 , A=2_0_4_8 , A=1_2 , A=1_6 , A=0.1 , A=0.1 , A=True , A=False , A=False , A=False , A=1 , A=True , A=5_1_2 , A=2_0_4_8**-0.5 , A=1E-12 , A=0.02 , A=0 , A=1 , A=2 , A=3 , A=5 , A=True , A="first" , A=True , A=None , A=True , A=0.1 , A=5 , A=5 , A=0 , A=0 , A=2 , A=0 , **A , ) -> Tuple: _UpperCAmelCase : Dict = vocab_size _UpperCAmelCase : Tuple = emb_dim _UpperCAmelCase : Optional[Any] = n_layers _UpperCAmelCase : Optional[Any] = n_heads _UpperCAmelCase : Dict = dropout _UpperCAmelCase : int = attention_dropout _UpperCAmelCase : Optional[Any] = gelu_activation _UpperCAmelCase : str = sinusoidal_embeddings _UpperCAmelCase : Any = causal _UpperCAmelCase : Optional[int] = asm _UpperCAmelCase : List[str] = n_langs _UpperCAmelCase : int = use_lang_emb _UpperCAmelCase : Any = layer_norm_eps _UpperCAmelCase : Any = bos_index _UpperCAmelCase : Optional[Any] = eos_index _UpperCAmelCase : List[str] = pad_index _UpperCAmelCase : Optional[int] = unk_index _UpperCAmelCase : Dict = mask_index _UpperCAmelCase : Any = is_encoder _UpperCAmelCase : Optional[Any] = max_position_embeddings _UpperCAmelCase : List[Any] = embed_init_std _UpperCAmelCase : Union[str, Any] = init_std _UpperCAmelCase : List[str] = summary_type _UpperCAmelCase : Dict = summary_use_proj _UpperCAmelCase : str = summary_activation _UpperCAmelCase : Union[str, Any] = summary_proj_to_labels _UpperCAmelCase : Tuple = summary_first_dropout _UpperCAmelCase : List[str] = start_n_top _UpperCAmelCase : Tuple = end_n_top _UpperCAmelCase : List[str] = mask_token_id _UpperCAmelCase : Optional[int] = lang_id if "n_words" in kwargs: _UpperCAmelCase : Tuple = kwargs['''n_words'''] super().__init__(pad_token_id=A , bos_token_id=A , **A ) class _UpperCAmelCase ( a ): '''simple docstring''' @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCAmelCase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _UpperCAmelCase : Dict = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
68
0
"""simple docstring""" def snake_case_ ( A_ : list[int], A_ : list[int] ): '''simple docstring''' if not len(A_ ) == len(A_ ) == 3: raise ValueError('''Please enter a valid equation.''' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('''Both a & b of two equations can\'t be zero.''' ) # Extract the coefficients _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = equationa _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = equationa # Calculate the determinants of the matrices _lowerCamelCase : Any = aa * ba - aa * ba _lowerCamelCase : str = ca * ba - ca * ba _lowerCamelCase : Any = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('''Infinite solutions. (Consistent system)''' ) else: raise ValueError('''No solution. (Inconsistent system)''' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: _lowerCamelCase : str = determinant_x / determinant _lowerCamelCase : Optional[int] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
72
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowerCamelCase : List[str] = LxmertForPreTraining(A_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(A_, A_, A_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), A_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
72
1
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig _a = logging.get_logger(__name__) # General docstring _a = 'MobileNetV1Config' # Base docstring _a = 'google/mobilenet_v1_1.0_224' _a = [1, 10_24, 7, 7] # Image classification docstring _a = 'google/mobilenet_v1_1.0_224' _a = 'tabby, tabby cat' _a = [ 'google/mobilenet_v1_1.0_224', 'google/mobilenet_v1_0.75_192', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : int, UpperCamelCase_ : Optional[int]=None) -> int: '''simple docstring''' __lowercase = {} if isinstance(UpperCamelCase_, UpperCamelCase_): __lowercase = model.mobilenet_va else: __lowercase = model __lowercase = "MobilenetV1/Conv2d_0/" __lowercase = backbone.conv_stem.convolution.weight __lowercase = backbone.conv_stem.normalization.bias __lowercase = backbone.conv_stem.normalization.weight __lowercase = backbone.conv_stem.normalization.running_mean __lowercase = backbone.conv_stem.normalization.running_var for i in range(13): __lowercase = i + 1 __lowercase = i * 2 __lowercase = backbone.layer[pt_index] __lowercase = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/""" __lowercase = pointer.convolution.weight __lowercase = pointer.normalization.bias __lowercase = pointer.normalization.weight __lowercase = pointer.normalization.running_mean __lowercase = pointer.normalization.running_var __lowercase = backbone.layer[pt_index + 1] __lowercase = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/""" __lowercase = pointer.convolution.weight __lowercase = pointer.normalization.bias __lowercase = pointer.normalization.weight __lowercase = pointer.normalization.running_mean __lowercase = pointer.normalization.running_var if isinstance(UpperCamelCase_, UpperCamelCase_): __lowercase = "MobilenetV1/Logits/Conv2d_1c_1x1/" __lowercase = model.classifier.weight __lowercase = model.classifier.bias return tf_to_pt_map def _A ( UpperCamelCase_ : int, UpperCamelCase_ : int, UpperCamelCase_ : List[str]) -> List[str]: '''simple docstring''' try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise # Load weights from TF model __lowercase = tf.train.list_variables(UpperCamelCase_) __lowercase = {} for name, shape in init_vars: logger.info(F"""Loading TF weight {name} with shape {shape}""") __lowercase = tf.train.load_variable(UpperCamelCase_, UpperCamelCase_) __lowercase = array # Build TF to PyTorch weights loading map __lowercase = _build_tf_to_pytorch_map(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_) for name, pointer in tf_to_pt_map.items(): logger.info(F"""Importing {name}""") if name not in tf_weights: logger.info(F"""{name} not in tf pre-trained weights, skipping""") continue __lowercase = tf_weights[name] if "depthwise_weights" in name: logger.info("Transposing depthwise") __lowercase = np.transpose(UpperCamelCase_, (2, 3, 0, 1)) elif "weights" in name: logger.info("Transposing") if len(pointer.shape) == 2: # copying into linear layer __lowercase = array.squeeze().transpose() else: __lowercase = np.transpose(UpperCamelCase_, (3, 2, 0, 1)) if pointer.shape != array.shape: raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""") logger.info(F"""Initialize PyTorch weight {name} {array.shape}""") __lowercase = torch.from_numpy(UpperCamelCase_) tf_weights.pop(UpperCamelCase_, UpperCamelCase_) tf_weights.pop(name + "/RMSProp", UpperCamelCase_) tf_weights.pop(name + "/RMSProp_1", UpperCamelCase_) tf_weights.pop(name + "/ExponentialMovingAverage", UpperCamelCase_) logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys())}""") return model def _A ( UpperCamelCase_ : torch.Tensor, UpperCamelCase_ : nn.Convad) -> torch.Tensor: '''simple docstring''' __lowercase ,__lowercase = features.shape[-2:] __lowercase ,__lowercase = conv_layer.stride __lowercase ,__lowercase = conv_layer.kernel_size if in_height % stride_height == 0: __lowercase = max(kernel_height - stride_height, 0) else: __lowercase = max(kernel_height - (in_height % stride_height), 0) if in_width % stride_width == 0: __lowercase = max(kernel_width - stride_width, 0) else: __lowercase = max(kernel_width - (in_width % stride_width), 0) __lowercase = pad_along_width // 2 __lowercase = pad_along_width - pad_left __lowercase = pad_along_height // 2 __lowercase = pad_along_height - pad_top __lowercase = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(UpperCamelCase_, UpperCamelCase_, "constant", 0.0) class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Optional[Any], UpperCAmelCase__ : MobileNetVaConfig, UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : bool = False, UpperCAmelCase__ : Optional[bool] = True, UpperCAmelCase__ : Optional[bool or str] = True, ): super().__init__() __lowercase = config if in_channels % groups != 0: raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" ) if out_channels % groups != 0: raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" ) __lowercase = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) __lowercase = nn.Convad( in_channels=UpperCAmelCase__, out_channels=UpperCAmelCase__, kernel_size=UpperCAmelCase__, stride=UpperCAmelCase__, padding=UpperCAmelCase__, groups=UpperCAmelCase__, bias=UpperCAmelCase__, padding_mode="zeros", ) if use_normalization: __lowercase = nn.BatchNormad( num_features=UpperCAmelCase__, eps=config.layer_norm_eps, momentum=0.9_997, affine=UpperCAmelCase__, track_running_stats=UpperCAmelCase__, ) else: __lowercase = None if use_activation: if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = ACTaFN[use_activation] elif isinstance(config.hidden_act, UpperCAmelCase__ ): __lowercase = ACTaFN[config.hidden_act] else: __lowercase = config.hidden_act else: __lowercase = None def _lowercase ( self : Any, UpperCAmelCase__ : torch.Tensor ): if self.config.tf_padding: __lowercase = apply_tf_padding(UpperCAmelCase__, self.convolution ) __lowercase = self.convolution(UpperCAmelCase__ ) if self.normalization is not None: __lowercase = self.normalization(UpperCAmelCase__ ) if self.activation is not None: __lowercase = self.activation(UpperCAmelCase__ ) return features class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : Optional[Any] = MobileNetVaConfig __UpperCAmelCase : Optional[Any] = load_tf_weights_in_mobilenet_va __UpperCAmelCase : Tuple = "mobilenet_v1" __UpperCAmelCase : str = "pixel_values" __UpperCAmelCase : Dict = False def _lowercase ( self : Optional[int], UpperCAmelCase__ : Union[nn.Linear, nn.Convad] ): if isinstance(UpperCAmelCase__, (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(UpperCAmelCase__, nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) _a = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' _a = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,lowercase ,) class _lowerCAmelCase ( lowercase ): """simple docstring""" def __init__( self : Any, UpperCAmelCase__ : MobileNetVaConfig, UpperCAmelCase__ : bool = True ): super().__init__(UpperCAmelCase__ ) __lowercase = config __lowercase = 3_2 __lowercase = max(int(depth * config.depth_multiplier ), config.min_depth ) __lowercase = MobileNetVaConvLayer( UpperCAmelCase__, in_channels=config.num_channels, out_channels=UpperCAmelCase__, kernel_size=3, stride=2, ) __lowercase = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __lowercase = nn.ModuleList() for i in range(1_3 ): __lowercase = out_channels if strides[i] == 2 or i == 0: depth *= 2 __lowercase = max(int(depth * config.depth_multiplier ), config.min_depth ) self.layer.append( MobileNetVaConvLayer( UpperCAmelCase__, in_channels=UpperCAmelCase__, out_channels=UpperCAmelCase__, kernel_size=3, stride=strides[i], groups=UpperCAmelCase__, ) ) self.layer.append( MobileNetVaConvLayer( UpperCAmelCase__, in_channels=UpperCAmelCase__, out_channels=UpperCAmelCase__, kernel_size=1, ) ) __lowercase = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def _lowercase ( self : int, UpperCAmelCase__ : str ): raise NotImplementedError @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=UpperCAmelCase__, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def _lowercase ( self : Optional[int], UpperCAmelCase__ : Optional[torch.Tensor] = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[bool] = None, ): __lowercase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) __lowercase = self.conv_stem(UpperCAmelCase__ ) __lowercase = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): __lowercase = layer_module(UpperCAmelCase__ ) if output_hidden_states: __lowercase = all_hidden_states + (hidden_states,) __lowercase = hidden_states if self.pooler is not None: __lowercase = torch.flatten(self.pooler(UpperCAmelCase__ ), start_dim=1 ) else: __lowercase = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=UpperCAmelCase__, pooler_output=UpperCAmelCase__, hidden_states=UpperCAmelCase__, ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,lowercase ,) class _lowerCAmelCase ( lowercase ): """simple docstring""" def __init__( self : Union[str, Any], UpperCAmelCase__ : MobileNetVaConfig ): super().__init__(UpperCAmelCase__ ) __lowercase = config.num_labels __lowercase = MobileNetVaModel(UpperCAmelCase__ ) __lowercase = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __lowercase = nn.Dropout(config.classifier_dropout_prob, inplace=UpperCAmelCase__ ) __lowercase = nn.Linear(UpperCAmelCase__, config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=UpperCAmelCase__, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def _lowercase ( self : int, UpperCAmelCase__ : Optional[torch.Tensor] = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[torch.Tensor] = None, UpperCAmelCase__ : Optional[bool] = None, ): __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = self.mobilenet_va(UpperCAmelCase__, output_hidden_states=UpperCAmelCase__, return_dict=UpperCAmelCase__ ) __lowercase = outputs.pooler_output if return_dict else outputs[1] __lowercase = self.classifier(self.dropout(UpperCAmelCase__ ) ) __lowercase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowercase = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowercase = "single_label_classification" else: __lowercase = "multi_label_classification" if self.config.problem_type == "regression": __lowercase = MSELoss() if self.num_labels == 1: __lowercase = loss_fct(logits.squeeze(), labels.squeeze() ) else: __lowercase = loss_fct(UpperCAmelCase__, UpperCAmelCase__ ) elif self.config.problem_type == "single_label_classification": __lowercase = CrossEntropyLoss() __lowercase = loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowercase = BCEWithLogitsLoss() __lowercase = loss_fct(UpperCAmelCase__, UpperCAmelCase__ ) if not return_dict: __lowercase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=UpperCAmelCase__, logits=UpperCAmelCase__, hidden_states=outputs.hidden_states, )
144
"""simple docstring""" import numpy # List of input, output pairs _a = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) _a = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50)) _a = [2, 4, 1, 5] _a = len(train_data) _a = 0.009 def _A ( UpperCamelCase_ : str, UpperCamelCase_ : List[Any]="train") -> Optional[Any]: '''simple docstring''' return calculate_hypothesis_value(UpperCamelCase_, UpperCamelCase_) - output( UpperCamelCase_, UpperCamelCase_) def _A ( UpperCamelCase_ : List[Any]) -> Union[str, Any]: '''simple docstring''' __lowercase = 0 for i in range(len(UpperCamelCase_) - 1): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : Optional[int]) -> Dict: '''simple docstring''' if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : List[str]) -> int: '''simple docstring''' if data_set == "train": return _hypothesis_value(train_data[example_no][0]) elif data_set == "test": return _hypothesis_value(test_data[example_no][0]) return None def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : Tuple=m) -> int: '''simple docstring''' __lowercase = 0 for i in range(UpperCamelCase_): if index == -1: summation_value += _error(UpperCamelCase_) else: summation_value += _error(UpperCamelCase_) * train_data[i][0][index] return summation_value def _A ( UpperCamelCase_ : str) -> str: '''simple docstring''' __lowercase = summation_of_cost_derivative(UpperCamelCase_, UpperCamelCase_) / m return cost_derivative_value def _A ( ) -> List[str]: '''simple docstring''' global parameter_vector # Tune these values to set a tolerance value for predicted output __lowercase = 0.000_002 __lowercase = 0 __lowercase = 0 while True: j += 1 __lowercase = [0, 0, 0, 0] for i in range(0, len(UpperCamelCase_)): __lowercase = get_cost_derivative(i - 1) __lowercase = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( UpperCamelCase_, UpperCamelCase_, atol=UpperCamelCase_, rtol=UpperCamelCase_, ): break __lowercase = temp_parameter_vector print(("Number of iterations:", j)) def _A ( ) -> int: '''simple docstring''' for i in range(len(UpperCamelCase_)): print(("Actual output value:", output(UpperCamelCase_, "test"))) print(("Hypothesis output:", calculate_hypothesis_value(UpperCamelCase_, "test"))) if __name__ == "__main__": run_gradient_descent() print('\nTesting gradient descent for a linear hypothesis function.\n') test_gradient_descent()
144
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class lowercase_ ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] ) ->List[str]: """simple docstring""" a = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) a = get_activation('''gelu''' ) self.assertTrue(torch.allclose(gelu_python(__UpperCAmelCase ) , torch_builtin(__UpperCAmelCase ) ) ) self.assertFalse(torch.allclose(gelu_python(__UpperCAmelCase ) , gelu_new(__UpperCAmelCase ) ) ) def __lowerCAmelCase ( self : Optional[Any] ) ->List[Any]: """simple docstring""" a = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) a = get_activation('''gelu''' ) a = get_activation('''gelu_10''' ) a = torch_builtin(__UpperCAmelCase ) a = geluaa(__UpperCAmelCase ) a = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(__UpperCAmelCase ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def __lowerCAmelCase ( self : List[Any] ) ->Optional[int]: """simple docstring""" get_activation('''gelu''' ) get_activation('''gelu_10''' ) get_activation('''gelu_fast''' ) get_activation('''gelu_new''' ) get_activation('''gelu_python''' ) get_activation('''gelu_pytorch_tanh''' ) get_activation('''linear''' ) get_activation('''mish''' ) get_activation('''quick_gelu''' ) get_activation('''relu''' ) get_activation('''sigmoid''' ) get_activation('''silu''' ) get_activation('''swish''' ) get_activation('''tanh''' ) with self.assertRaises(__UpperCAmelCase ): get_activation('''bogus''' ) with self.assertRaises(__UpperCAmelCase ): get_activation(__UpperCAmelCase ) def __lowerCAmelCase ( self : Any ) ->str: """simple docstring""" a = get_activation('''gelu''' ) a = 1 a = get_activation('''gelu''' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(__UpperCAmelCase ): a = acta.a
0
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if upper_limit < 0: raise ValueError("Limit for the Catalan sequence must be ≥ 0" ) __SCREAMING_SNAKE_CASE = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 __SCREAMING_SNAKE_CASE = 1 if upper_limit > 0: __SCREAMING_SNAKE_CASE = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowerCAmelCase_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''') print('''\n*** Enter -1 at any time to quit ***''') print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''') try: while True: a__ : List[str] = int(input().strip()) if N < 0: print('''\n********* Goodbye!! ************''') break else: print(F"The Catalan numbers from 0 through {N} are:") print(catalan_numbers(N)) print('''Try another upper limit for the sequence: ''', end='''''') except (NameError, ValueError): print('''\n********* Invalid input, goodbye! ************\n''') import doctest doctest.testmod()
54
0
def lowerCamelCase_ ( lowerCAmelCase: str )-> str: return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
350
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _lowerCAmelCase ( UpperCAmelCase_ ): '''simple docstring''' a_ : int ="""naver-clova-ix/donut-base-finetuned-docvqa""" a_ : Dict =( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) a_ : Optional[Any] ="""document_qa""" a_ : str =AutoProcessor a_ : Union[str, Any] =VisionEncoderDecoderModel a_ : List[Any] =["""image""", """text"""] a_ : List[Any] =["""text"""] def __init__( self : Any , *UpperCamelCase : List[Any] , **UpperCamelCase : List[Any] ): '''simple docstring''' if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*UpperCamelCase , **UpperCamelCase ) def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : "Image" , UpperCamelCase : str ): '''simple docstring''' _snake_case : Dict = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' _snake_case : int = task_prompt.replace('{user_input}' , UpperCamelCase ) _snake_case : Union[str, Any] = self.pre_processor.tokenizer( UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors='pt' ).input_ids _snake_case : Dict = self.pre_processor(UpperCamelCase , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def UpperCamelCase_ ( self : Dict , UpperCamelCase : Optional[int] ): '''simple docstring''' return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCamelCase , ).sequences def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : str ): '''simple docstring''' _snake_case : Optional[Any] = self.pre_processor.batch_decode(UpperCamelCase )[0] _snake_case : int = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) _snake_case : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) _snake_case : int = re.sub(R'<.*?>' , '' , UpperCamelCase , count=1 ).strip() # remove first task start token _snake_case : str = self.pre_processor.tokenajson(UpperCamelCase ) return sequence["answer"]
260
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging a :List[Any] = logging.get_logger(__name__) class __a (snake_case_): '''simple docstring''' _SCREAMING_SNAKE_CASE :Dict = ["""pixel_values"""] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BICUBIC , _a = True , _a = True , _a = 1 / 255 , _a = None , _a = True , _a = None , _a = None , **_a , ) -> Tuple: """simple docstring""" super().__init__(**_a ) SCREAMING_SNAKE_CASE__ : Tuple = size if size is not None else {"""height""": 224, """width""": 224} SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_a ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_a , default_to_square=_a , param_name="""crop_size""" ) SCREAMING_SNAKE_CASE__ : Tuple = do_resize SCREAMING_SNAKE_CASE__ : str = do_rescale SCREAMING_SNAKE_CASE__ : Tuple = do_normalize SCREAMING_SNAKE_CASE__ : str = do_center_crop SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size SCREAMING_SNAKE_CASE__ : Optional[int] = size SCREAMING_SNAKE_CASE__ : str = resample SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD def _a ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(_a ) if "shortest_edge" in size: SCREAMING_SNAKE_CASE__ : Tuple = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: SCREAMING_SNAKE_CASE__ : int = (size["""height"""], size["""width"""]) else: raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def _a ( self , _a , _a , _a = None , **_a , ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a ) def _a ( self , _a , _a , _a = None , **_a ) -> Union[str, Any]: """simple docstring""" return rescale(_a , scale=_a , data_format=_a , **_a ) def _a ( self , _a , _a , _a , _a = None , **_a , ) -> Optional[Any]: """simple docstring""" return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def _a ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE__ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_a , param_name="""crop_size""" , default_to_square=_a ) SCREAMING_SNAKE_CASE__ : List[Any] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE__ : int = size if size is not None else self.size SCREAMING_SNAKE_CASE__ : Any = get_size_dict(_a ) if not is_batched(_a ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = [images] if not valid_images(_a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(_a ) for image in images] if do_resize: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE__ : Tuple = [self.center_crop(image=_a , size=_a ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE__ : Any = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE__ : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] SCREAMING_SNAKE_CASE__ : Any = [to_channel_dimension_format(_a , _a ) for image in images] SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""pixel_values""": images} return BatchFeature(data=_a , tensor_type=_a )
132
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def _lowerCamelCase( a , a , a , a , a=True , a="pt" ): __a = {"add_prefix_space": True} if isinstance(a , a ) and not line.startswith(" " ) else {} __a = padding_side return tokenizer( [line] , max_length=a , padding="max_length" if pad_to_max_length else None , truncation=a , return_tensors=a , add_special_tokens=a , **a , ) def _lowerCamelCase( a , a , a=None , ): __a = input_ids.ne(a ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="train" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="" , ): super().__init__() __a = Path(lowerCamelCase ).joinpath(type_path + ".source" ) __a = Path(lowerCamelCase ).joinpath(type_path + ".target" ) __a = self.get_char_lens(self.src_file ) __a = max_source_length __a = max_target_length assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}" __a = tokenizer __a = prefix if n_obs is not None: __a = self.src_lens[:n_obs] __a = src_lang __a = tgt_lang def __len__( self ): return len(self.src_lens ) def __getitem__( self , lowerCamelCase ): __a = index + 1 # linecache starts at 1 __a = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase ).rstrip("\n" ) __a = linecache.getline(str(self.tgt_file ) , lowerCamelCase ).rstrip("\n" ) assert source_line, F"empty source line for index {index}" assert tgt_line, F"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer , lowerCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __a = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer ) __a = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer __a = encode_line(lowerCamelCase , lowerCamelCase , self.max_source_length , "right" ) __a = encode_line(lowerCamelCase , lowerCamelCase , self.max_target_length , "right" ) __a = source_inputs["input_ids"].squeeze() __a = target_inputs["input_ids"].squeeze() __a = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def a__ ( lowerCamelCase ): return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()] def a__ ( self , lowerCamelCase ): __a = torch.stack([x["input_ids"] for x in batch] ) __a = torch.stack([x["attention_mask"] for x in batch] ) __a = torch.stack([x["decoder_input_ids"] for x in batch] ) __a = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer.pad_token_id ) __a = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer.pad_token_id ) __a = trim_batch(lowerCamelCase , lowerCamelCase ) __a , __a = trim_batch(lowerCamelCase , lowerCamelCase , attention_mask=lowerCamelCase ) __a = { "input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y, } return batch SCREAMING_SNAKE_CASE__:Tuple = getLogger(__name__) def _lowerCamelCase( a ): return list(itertools.chain.from_iterable(a ) ) def _lowerCamelCase( a ): __a = get_git_info() save_json(a , os.path.join(a , "git_log.json" ) ) def _lowerCamelCase( a , a , a=4 , **a ): with open(a , "w" ) as f: json.dump(a , a , indent=a , **a ) def _lowerCamelCase( a ): with open(a ) as f: return json.load(a ) def _lowerCamelCase( ): __a = git.Repo(search_parent_directories=a ) __a = { "repo_id": str(a ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), "hostname": str(socket.gethostname() ), } return repo_infos def _lowerCamelCase( a , a ): return list(map(a , a ) ) def _lowerCamelCase( a , a ): with open(a , "wb" ) as f: return pickle.dump(a , a ) def _lowerCamelCase( a ): def remove_articles(a ): return re.sub(R"\b(a|an|the)\b" , " " , a ) def white_space_fix(a ): return " ".join(text.split() ) def remove_punc(a ): __a = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(a ) ) ) ) def _lowerCamelCase( a , a ): __a = normalize_answer(a ).split() __a = normalize_answer(a ).split() __a = Counter(a ) & Counter(a ) __a = sum(common.values() ) if num_same == 0: return 0 __a = 1.0 * num_same / len(a ) __a = 1.0 * num_same / len(a ) __a = (2 * precision * recall) / (precision + recall) return fa def _lowerCamelCase( a , a ): return normalize_answer(a ) == normalize_answer(a ) def _lowerCamelCase( a , a ): assert len(a ) == len(a ) __a = 0 for hypo, pred in zip(a , a ): em += exact_match_score(a , a ) if len(a ) > 0: em /= len(a ) return {"em": em} def _lowerCamelCase( a ): return model_prefix.startswith("rag" ) def _lowerCamelCase( a , a , a ): __a = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __a = "dropout_rate" for p in extra_params: if getattr(a , a , a ): if not hasattr(a , a ) and not hasattr(a , equivalent_param[p] ): logger.info("config doesn't have a `{}` attribute".format(a ) ) delattr(a , a ) continue __a = p if hasattr(a , a ) else equivalent_param[p] setattr(a , a , getattr(a , a ) ) delattr(a , a ) return hparams, config
261
0
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __a = logging.get_logger(__name__) __a = OrderedDict( [ ("audio-spectrogram-transformer", "ASTFeatureExtractor"), ("beit", "BeitFeatureExtractor"), ("chinese_clip", "ChineseCLIPFeatureExtractor"), ("clap", "ClapFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), ("clipseg", "ViTFeatureExtractor"), ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), ("deformable_detr", "DeformableDetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("dinat", "ViTFeatureExtractor"), ("donut-swin", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("encodec", "EncodecFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), ("glpn", "GLPNFeatureExtractor"), ("groupvit", "CLIPFeatureExtractor"), ("hubert", "Wav2Vec2FeatureExtractor"), ("imagegpt", "ImageGPTFeatureExtractor"), ("layoutlmv2", "LayoutLMv2FeatureExtractor"), ("layoutlmv3", "LayoutLMv3FeatureExtractor"), ("levit", "LevitFeatureExtractor"), ("maskformer", "MaskFormerFeatureExtractor"), ("mctct", "MCTCTFeatureExtractor"), ("mobilenet_v1", "MobileNetV1FeatureExtractor"), ("mobilenet_v2", "MobileNetV2FeatureExtractor"), ("mobilevit", "MobileViTFeatureExtractor"), ("nat", "ViTFeatureExtractor"), ("owlvit", "OwlViTFeatureExtractor"), ("perceiver", "PerceiverFeatureExtractor"), ("poolformer", "PoolFormerFeatureExtractor"), ("regnet", "ConvNextFeatureExtractor"), ("resnet", "ConvNextFeatureExtractor"), ("segformer", "SegformerFeatureExtractor"), ("sew", "Wav2Vec2FeatureExtractor"), ("sew-d", "Wav2Vec2FeatureExtractor"), ("speech_to_text", "Speech2TextFeatureExtractor"), ("speecht5", "SpeechT5FeatureExtractor"), ("swiftformer", "ViTFeatureExtractor"), ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("table-transformer", "DetrFeatureExtractor"), ("timesformer", "VideoMAEFeatureExtractor"), ("tvlt", "TvltFeatureExtractor"), ("unispeech", "Wav2Vec2FeatureExtractor"), ("unispeech-sat", "Wav2Vec2FeatureExtractor"), ("van", "ConvNextFeatureExtractor"), ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), ("wavlm", "Wav2Vec2FeatureExtractor"), ("whisper", "WhisperFeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] ) __a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def __snake_case( _lowerCAmelCase ) -> Any: for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: snake_case__ : List[Any] = model_type_to_module_name(_lowerCAmelCase ) snake_case__ : List[Any] = importlib.import_module(f".{module_name}" , """transformers.models""" ) try: return getattr(_lowerCAmelCase , _lowerCAmelCase ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(_lowerCAmelCase , """__name__""" , _lowerCAmelCase ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. snake_case__ : str = importlib.import_module("""transformers""" ) if hasattr(_lowerCAmelCase , _lowerCAmelCase ): return getattr(_lowerCAmelCase , _lowerCAmelCase ) return None def __snake_case( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , **_lowerCAmelCase , ) -> Tuple: snake_case__ : List[Any] = get_file_from_repo( _lowerCAmelCase , _lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , resume_download=_lowerCAmelCase , proxies=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , local_files_only=_lowerCAmelCase , ) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(_lowerCAmelCase , encoding="""utf-8""" ) as reader: return json.load(_lowerCAmelCase ) class UpperCAmelCase_ : """simple docstring""" def __init__( self : Tuple ): raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(__UpperCAmelCase ) def lowerCamelCase ( cls : int , snake_case_ : Optional[Any] , **snake_case_ : Dict ): snake_case__ : Union[str, Any] = kwargs.pop("""config""" , __UpperCAmelCase ) snake_case__ : Dict = kwargs.pop("""trust_remote_code""" , __UpperCAmelCase ) snake_case__ : Optional[int] = True snake_case__ , snake_case__ : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(__UpperCAmelCase , **__UpperCAmelCase ) snake_case__ : int = config_dict.get("""feature_extractor_type""" , __UpperCAmelCase ) snake_case__ : Optional[Any] = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): snake_case__ : Any = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): snake_case__ : str = AutoConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) # It could be in `config.feature_extractor_type`` snake_case__ : List[Any] = getattr(__UpperCAmelCase , """feature_extractor_type""" , __UpperCAmelCase ) if hasattr(__UpperCAmelCase , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: snake_case__ : Any = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: snake_case__ : Tuple = feature_extractor_class_from_name(__UpperCAmelCase ) snake_case__ : Tuple = feature_extractor_auto_map is not None snake_case__ : Union[str, Any] = feature_extractor_class is not None or type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING snake_case__ : str = resolve_trust_remote_code( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if has_remote_code and trust_remote_code: snake_case__ : Union[str, Any] = get_class_from_dynamic_module( __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) snake_case__ : Optional[Any] = kwargs.pop("""code_revision""" , __UpperCAmelCase ) if os.path.isdir(__UpperCAmelCase ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING: snake_case__ : Optional[Any] = FEATURE_EXTRACTOR_MAPPING[type(__UpperCAmelCase )] return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) raise ValueError( f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a " f"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following " f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" ) @staticmethod def lowerCamelCase ( snake_case_ : Optional[int] , snake_case_ : Tuple ): FEATURE_EXTRACTOR_MAPPING.register(__UpperCAmelCase , __UpperCAmelCase )
354
'''simple docstring''' import math import qiskit def __snake_case( _lowerCAmelCase = 1 , _lowerCAmelCase = 1 , _lowerCAmelCase = 1 ) -> qiskit.result.counts.Counts: if ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) or isinstance(_lowerCAmelCase , _lowerCAmelCase ) or isinstance(_lowerCAmelCase , _lowerCAmelCase ) ): raise TypeError("""inputs must be integers.""" ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError("""inputs must be positive.""" ) if ( (math.floor(_lowerCAmelCase ) != input_a) or (math.floor(_lowerCAmelCase ) != input_a) or (math.floor(_lowerCAmelCase ) != carry_in) ): raise ValueError("""inputs must be exact integers.""" ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError("""inputs must be less or equal to 2.""" ) # build registers snake_case__ : List[str] = qiskit.QuantumRegister(4 , """qr""" ) snake_case__ : Optional[int] = qiskit.ClassicalRegister(2 , """cr""" ) # list the entries snake_case__ : List[Any] = [input_a, input_a, carry_in] snake_case__ : Union[str, Any] = qiskit.QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(_lowerCAmelCase ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(_lowerCAmelCase ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(_lowerCAmelCase ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , _lowerCAmelCase ) # measure the last two qbits snake_case__ : int = qiskit.Aer.get_backend("""aer_simulator""" ) snake_case__ : Tuple = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1_000 ) return job.result().get_counts(_lowerCAmelCase ) if __name__ == "__main__": print(F"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
43
0
'''simple docstring''' import itertools import string from collections.abc import Generator, Iterable def __snake_case ( UpperCAmelCase_ : Iterable[str] , UpperCAmelCase_ : int ): lowerCamelCase_ = iter(UpperCAmelCase_ ) while True: lowerCamelCase_ = tuple(itertools.islice(UpperCAmelCase_ , UpperCAmelCase_ ) ) if not chunk: return yield chunk def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = "".join([c.upper() for c in dirty if c in string.ascii_letters] ) lowerCamelCase_ = "" if len(UpperCAmelCase_ ) < 2: return dirty for i in range(len(UpperCAmelCase_ ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(UpperCAmelCase_ ) & 1: clean += "X" return clean def __snake_case ( UpperCAmelCase_ : str ): # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) lowerCamelCase_ = "ABCDEFGHIKLMNOPQRSTUVWXYZ" # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler lowerCamelCase_ = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(UpperCAmelCase_ ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(UpperCAmelCase_ ) return table def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ): lowerCamelCase_ = generate_table(UpperCAmelCase_ ) lowerCamelCase_ = prepare_input(UpperCAmelCase_ ) lowerCamelCase_ = "" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(UpperCAmelCase_ , 2 ): lowerCamelCase_ ,lowerCamelCase_ = divmod(table.index(UpperCAmelCase_ ) , 5 ) lowerCamelCase_ ,lowerCamelCase_ = divmod(table.index(UpperCAmelCase_ ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ): lowerCamelCase_ = generate_table(UpperCAmelCase_ ) lowerCamelCase_ = "" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(UpperCAmelCase_ , 2 ): lowerCamelCase_ ,lowerCamelCase_ = divmod(table.index(UpperCAmelCase_ ) , 5 ) lowerCamelCase_ ,lowerCamelCase_ = divmod(table.index(UpperCAmelCase_ ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
55
class UpperCAmelCase : '''simple docstring''' def __init__( self : Dict ): """simple docstring""" snake_case_ = {} # Mapping from char to TrieNode snake_case_ = False def snake_case__ ( self : Dict , __lowercase : list[str] ): """simple docstring""" for word in words: self.insert(__lowercase ) def snake_case__ ( self : List[str] , __lowercase : str ): """simple docstring""" snake_case_ = self for char in word: if char not in curr.nodes: snake_case_ = TrieNode() snake_case_ = curr.nodes[char] snake_case_ = True def snake_case__ ( self : List[Any] , __lowercase : str ): """simple docstring""" snake_case_ = self for char in word: if char not in curr.nodes: return False snake_case_ = curr.nodes[char] return curr.is_leaf def snake_case__ ( self : Optional[Any] , __lowercase : str ): """simple docstring""" def _delete(__lowercase : TrieNode , __lowercase : str , __lowercase : int ) -> bool: if index == len(__lowercase ): # If word does not exist if not curr.is_leaf: return False snake_case_ = False return len(curr.nodes ) == 0 snake_case_ = word[index] snake_case_ = curr.nodes.get(__lowercase ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted snake_case_ = _delete(__lowercase , __lowercase , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , __lowercase , 0 ) def lowerCamelCase__ ( _A , _A ): '''simple docstring''' if node.is_leaf: print(_A , end=" " ) for key, value in node.nodes.items(): print_words(_A , word + key ) def lowerCamelCase__ ( ): '''simple docstring''' snake_case_ = "banana bananas bandana band apple all beast".split() snake_case_ = TrieNode() root.insert_many(_A ) # print_words(root, "") assert all(root.find(_A ) for word in words ) assert root.find("banana" ) assert not root.find("bandanas" ) assert not root.find("apps" ) assert root.find("apple" ) assert root.find("all" ) root.delete("all" ) assert not root.find("all" ) root.delete("banana" ) assert not root.find("banana" ) assert root.find("bananas" ) return True def lowerCamelCase__ ( _A , _A ): '''simple docstring''' print(str(_A ) , "works!" if passes else "doesn't work :(" ) def lowerCamelCase__ ( ): '''simple docstring''' assert test_trie() def lowerCamelCase__ ( ): '''simple docstring''' print_results("Testing trie functionality" , test_trie() ) if __name__ == "__main__": main()
187
0
def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' return int(input_a == input_a == 0 ) def A ( ): '''simple docstring''' print("Truth Table of NOR Gate:" ) print("| Input 1 | Input 2 | Output |" ) print(F"| 0 | 0 | {nor_gate(0 , 0 )} |" ) print(F"| 0 | 1 | {nor_gate(0 , 1 )} |" ) print(F"| 1 | 0 | {nor_gate(1 , 0 )} |" ) print(F"| 1 | 1 | {nor_gate(1 , 1 )} |" ) if __name__ == "__main__": import doctest doctest.testmod() main()
365
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) _snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ["BeitFeatureExtractor"] _snake_case = ["BeitImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "BEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "BeitForImageClassification", "BeitForMaskedImageModeling", "BeitForSemanticSegmentation", "BeitModel", "BeitPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "FlaxBeitForImageClassification", "FlaxBeitForMaskedImageModeling", "FlaxBeitModel", "FlaxBeitPreTrainedModel", ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
300
0
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A : List[str] = logging.get_logger(__name__) __A : int = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class _UpperCAmelCase ( _A ): SCREAMING_SNAKE_CASE_ : int = "deta" SCREAMING_SNAKE_CASE_ : List[str] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : Union[str, Any] , A : Optional[int]=None , A : Union[str, Any]=9_00 , A : Tuple=20_48 , A : int=6 , A : str=20_48 , A : Any=8 , A : Optional[int]=6 , A : Dict=10_24 , A : str=8 , A : Dict=0.0 , A : Union[str, Any]=True , A : List[Any]="relu" , A : Tuple=2_56 , A : Optional[int]=0.1 , A : int=0.0 , A : str=0.0 , A : List[Any]=0.02 , A : Union[str, Any]=1.0 , A : str=True , A : str=False , A : Optional[int]="sine" , A : Optional[Any]=5 , A : str=4 , A : Union[str, Any]=4 , A : Tuple=True , A : Union[str, Any]=3_00 , A : Optional[Any]=True , A : int=True , A : Dict=1 , A : Tuple=5 , A : Optional[Any]=2 , A : Optional[Any]=1 , A : Any=1 , A : int=5 , A : Optional[Any]=2 , A : List[str]=0.1 , A : Dict=0.25 , **A : Tuple , ) -> Dict: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowercase_ : Optional[int] = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(A , A ): lowercase_ : List[str] = backbone_config.pop('''model_type''' ) lowercase_ : List[str] = CONFIG_MAPPING[backbone_model_type] lowercase_ : Union[str, Any] = config_class.from_dict(A ) lowercase_ : List[str] = backbone_config lowercase_ : Optional[int] = num_queries lowercase_ : str = max_position_embeddings lowercase_ : Any = d_model lowercase_ : Optional[Any] = encoder_ffn_dim lowercase_ : List[str] = encoder_layers lowercase_ : Dict = encoder_attention_heads lowercase_ : int = decoder_ffn_dim lowercase_ : List[Any] = decoder_layers lowercase_ : int = decoder_attention_heads lowercase_ : Optional[Any] = dropout lowercase_ : Tuple = attention_dropout lowercase_ : str = activation_dropout lowercase_ : List[str] = activation_function lowercase_ : int = init_std lowercase_ : Dict = init_xavier_std lowercase_ : List[Any] = encoder_layerdrop lowercase_ : str = auxiliary_loss lowercase_ : Dict = position_embedding_type # deformable attributes lowercase_ : Union[str, Any] = num_feature_levels lowercase_ : Optional[int] = encoder_n_points lowercase_ : Dict = decoder_n_points lowercase_ : Tuple = two_stage lowercase_ : Union[str, Any] = two_stage_num_proposals lowercase_ : Tuple = with_box_refine lowercase_ : Optional[int] = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher lowercase_ : Optional[Any] = class_cost lowercase_ : Dict = bbox_cost lowercase_ : Optional[int] = giou_cost # Loss coefficients lowercase_ : Optional[int] = mask_loss_coefficient lowercase_ : Optional[Any] = dice_loss_coefficient lowercase_ : Dict = bbox_loss_coefficient lowercase_ : int = giou_loss_coefficient lowercase_ : Union[str, Any] = eos_coefficient lowercase_ : Dict = focal_alpha super().__init__(is_encoder_decoder=A , **A ) @property def A ( self : Any ) -> int: return self.encoder_attention_heads @property def A ( self : Optional[int] ) -> int: return self.d_model def A ( self : List[Any] ) -> Dict: lowercase_ : str = copy.deepcopy(self.__dict__ ) lowercase_ : Union[str, Any] = self.backbone_config.to_dict() lowercase_ : List[Any] = self.__class__.model_type return output
33
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class A__ ( unittest.TestCase): @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) __lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) def __lowerCamelCase ( self ): __lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) __lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
86
0
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCAmelCase_ ( a , unittest.TestCase): lowerCamelCase__ = KandinskyVaaImgaImgPipeline lowerCamelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''image'''] lowerCamelCase__ = [ '''image_embeds''', '''negative_image_embeds''', '''image''', ] lowerCamelCase__ = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowerCamelCase__ = False @property def snake_case__ ( self): '''simple docstring''' return 32 @property def snake_case__ ( self): '''simple docstring''' return 32 @property def snake_case__ ( self): '''simple docstring''' return self.time_input_dim @property def snake_case__ ( self): '''simple docstring''' return self.time_input_dim * 4 @property def snake_case__ ( self): '''simple docstring''' return 100 @property def snake_case__ ( self): '''simple docstring''' torch.manual_seed(0) _lowerCAmelCase : Dict = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } _lowerCAmelCase : Dict = UNetaDConditionModel(**lowerCAmelCase__) return model @property def snake_case__ ( self): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def snake_case__ ( self): '''simple docstring''' torch.manual_seed(0) _lowerCAmelCase : Union[str, Any] = VQModel(**self.dummy_movq_kwargs) return model def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.dummy_unet _lowerCAmelCase : str = self.dummy_movq _lowerCAmelCase : Optional[int] = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.00_085, "beta_end": 0.012, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } _lowerCAmelCase : Union[str, Any] = DDIMScheduler(**lowerCAmelCase__) _lowerCAmelCase : List[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def snake_case__ ( self, __a, __a=0): '''simple docstring''' _lowerCAmelCase : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) _lowerCAmelCase : List[str] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( lowerCAmelCase__) # create init_image _lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) _lowerCAmelCase : List[Any] = image.cpu().permute(0, 2, 3, 1)[0] _lowerCAmelCase : Dict = Image.fromarray(np.uinta(lowerCAmelCase__)).convert("RGB").resize((256, 256)) if str(lowerCAmelCase__).startswith("mps"): _lowerCAmelCase : int = torch.manual_seed(lowerCAmelCase__) else: _lowerCAmelCase : int = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) _lowerCAmelCase : Tuple = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : List[str] = "cpu" _lowerCAmelCase : List[Any] = self.get_dummy_components() _lowerCAmelCase : Optional[int] = self.pipeline_class(**lowerCAmelCase__) _lowerCAmelCase : Optional[Any] = pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) _lowerCAmelCase : int = pipe(**self.get_dummy_inputs(lowerCAmelCase__)) _lowerCAmelCase : Any = output.images _lowerCAmelCase : str = pipe( **self.get_dummy_inputs(lowerCAmelCase__), return_dict=lowerCAmelCase__, )[0] _lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1] _lowerCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : List[str] = np.array( [0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase): def snake_case__ ( self): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy") _lowerCAmelCase : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png") _lowerCAmelCase : List[str] = "A red cartoon frog, 4k" _lowerCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.floataa) pipe_prior.to(lowerCAmelCase__) _lowerCAmelCase : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.floataa) _lowerCAmelCase : List[Any] = pipeline.to(lowerCAmelCase__) pipeline.set_progress_bar_config(disable=lowerCAmelCase__) _lowerCAmelCase : Tuple = torch.Generator(device="cpu").manual_seed(0) _lowerCAmelCase : Optional[Any] = pipe_prior( lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=5, negative_prompt="", ).to_tuple() _lowerCAmelCase : Dict = pipeline( image=lowerCAmelCase__, image_embeds=lowerCAmelCase__, negative_image_embeds=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=100, height=768, width=768, strength=0.2, output_type="np", ) _lowerCAmelCase : Optional[int] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCAmelCase__, lowerCAmelCase__)
367
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase_ ( datasets.BuilderConfig): lowerCamelCase__ = None class UpperCAmelCase_ ( datasets.ArrowBasedBuilder): lowerCamelCase__ = PandasConfig def snake_case__ ( self): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features) def snake_case__ ( self, __a): '''simple docstring''' if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") _lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files) if isinstance(__a, (str, list, tuple)): _lowerCAmelCase : str = data_files if isinstance(__a, __a): _lowerCAmelCase : int = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] _lowerCAmelCase : str = [] for split_name, files in data_files.items(): if isinstance(__a, __a): _lowerCAmelCase : Optional[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files] splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files})) return splits def snake_case__ ( self, __a): '''simple docstring''' if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema) return pa_table def snake_case__ ( self, __a): '''simple docstring''' for i, file in enumerate(itertools.chain.from_iterable(__a)): with open(__a, "rb") as f: _lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a)) yield i, self._cast_table(__a)
300
0
'''simple docstring''' import mpmath # for roots of unity import numpy as np class a_ : '''simple docstring''' def __init__( self , A=None , A=None ) -> Dict: _SCREAMING_SNAKE_CASE = list(poly_a or [0] )[:] _SCREAMING_SNAKE_CASE = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _SCREAMING_SNAKE_CASE = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() _SCREAMING_SNAKE_CASE = len(self.polyB ) # Add 0 to make lengths equal a power of 2 _SCREAMING_SNAKE_CASE = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform _SCREAMING_SNAKE_CASE = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product _SCREAMING_SNAKE_CASE = self.__multiply() def snake_case_( self , A ) -> Dict: _SCREAMING_SNAKE_CASE = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(UpperCAmelCase__ ) <= 1: return dft[0] # _SCREAMING_SNAKE_CASE = self.c_max_length // 2 while next_ncol > 0: _SCREAMING_SNAKE_CASE = [[] for i in range(UpperCAmelCase__ )] _SCREAMING_SNAKE_CASE = self.root**next_ncol # First half of next step _SCREAMING_SNAKE_CASE = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(UpperCAmelCase__ ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step _SCREAMING_SNAKE_CASE = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(UpperCAmelCase__ ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update _SCREAMING_SNAKE_CASE = new_dft _SCREAMING_SNAKE_CASE = next_ncol // 2 return dft[0] def snake_case_( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = self.__dft("""A""" ) _SCREAMING_SNAKE_CASE = self.__dft("""B""" ) _SCREAMING_SNAKE_CASE = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT _SCREAMING_SNAKE_CASE = 2 while next_ncol <= self.c_max_length: _SCREAMING_SNAKE_CASE = [[] for i in range(UpperCAmelCase__ )] _SCREAMING_SNAKE_CASE = self.root ** (next_ncol // 2) _SCREAMING_SNAKE_CASE = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update _SCREAMING_SNAKE_CASE = new_inverse_c next_ncol *= 2 # Unpack _SCREAMING_SNAKE_CASE = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = """A = """ + """ + """.join( f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) ) _SCREAMING_SNAKE_CASE = """B = """ + """ + """.join( f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) ) _SCREAMING_SNAKE_CASE = """A*B = """ + """ + """.join( f'{coef}*x^{i}' for coef, i in enumerate(self.product ) ) return f'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
58
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar UpperCAmelCase__ : Tuple = TypeVar("""T""") class a__ ( Generic[T] ): """simple docstring""" def __init__( self : str , UpperCAmelCase__ : bool = True ) ->None: """simple docstring""" SCREAMING_SNAKE_CASE : dict[T, list[T]] = {} # dictionary of lists SCREAMING_SNAKE_CASE : Dict = directed def _lowercase ( self : int , UpperCAmelCase__ : T , UpperCAmelCase__ : T ) ->GraphAdjacencyList[T]: """simple docstring""" if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__ ) self.adj_list[destination_vertex].append(UpperCAmelCase__ ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : int = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : str = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: SCREAMING_SNAKE_CASE : Tuple = [destination_vertex] SCREAMING_SNAKE_CASE : str = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__ ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE : Any = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: SCREAMING_SNAKE_CASE : Optional[Any] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: SCREAMING_SNAKE_CASE : Dict = [destination_vertex] SCREAMING_SNAKE_CASE : List[Any] = [] return self def __repr__( self : Dict ) ->str: """simple docstring""" return pformat(self.adj_list )
245
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A : str = logging.get_logger(__name__) A : Tuple = {'vocab_file': 'sentencepiece.bpe.model'} A : Union[str, Any] = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } A : List[str] = { 'camembert-base': 5_1_2, } A : int = '▁' class __A( a ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ['''input_ids''', '''attention_mask'''] def __init__( self , _snake_case , _snake_case="<s>" , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case=["<s>NOTUSED", "</s>NOTUSED"] , _snake_case = None , **_snake_case , ) -> None: '''simple docstring''' __a = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token __a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , ) __a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_snake_case ) ) __a = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> __a = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3} __a = len(self.fairseq_tokens_to_ids ) __a = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) __a = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __a = [self.cls_token_id] __a = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case ) if token_ids_a is None: return [1] + ([0] * len(_snake_case )) + [1] return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1] def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> List[int]: '''simple docstring''' __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]: '''simple docstring''' __a = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]: '''simple docstring''' return self.sp_model.encode(_snake_case , out_type=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_snake_case ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[Any]: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Dict: '''simple docstring''' __a = [] __a = '''''' __a = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_snake_case ) + token __a = True __a = [] else: current_sub_tokens.append(_snake_case ) __a = False out_string += self.sp_model.decode(_snake_case ) return out_string.strip() def __getstate__( self ) -> List[Any]: '''simple docstring''' __a = self.__dict__.copy() __a = None return state def __setstate__( self , _snake_case ) -> Optional[int]: '''simple docstring''' __a = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __a = {} __a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(_snake_case ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __a = os.path.join( _snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _snake_case ) elif not os.path.isfile(self.vocab_file ): with open(_snake_case , '''wb''' ) as fi: __a = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (out_vocab_file,)
33
import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class __A( a ): snake_case_ = 0 snake_case_ = False snake_case_ = 3.0 class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} ) self.assertDictEqual(MockClass(a=2 , b=_snake_case ).to_kwargs() , {'''a''': 2, '''b''': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} ) @require_cuda def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = GradScalerKwargs(init_scale=1_024 , growth_factor=2 ) AcceleratorState._reset_state() __a = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) __a = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2_000 ) self.assertEqual(scaler._enabled , _snake_case ) @require_multi_gpu def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(_snake_case , env=os.environ.copy() ) if __name__ == "__main__": A : List[str] = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True) A : Optional[Any] = Accelerator(kwargs_handlers=[ddp_scaler]) A : int = torch.nn.Linear(1_0_0, 2_0_0) A : Optional[int] = accelerator.prepare(model) # Check the values changed in kwargs A : List[Any] = '' A : Tuple = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4) if observed_bucket_cap_map != 1_5: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
33
1
import pickle import numpy as np from matplotlib import pyplot as plt class A__ : def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=0.2 , A_=0.2 ): '''simple docstring''' UpperCamelCase : int = bp_numa UpperCamelCase : int = bp_numa UpperCamelCase : List[Any] = bp_numa UpperCamelCase : Optional[int] = conva_get[:2] UpperCamelCase : Optional[Any] = conva_get[2] UpperCamelCase : Dict = size_pa UpperCamelCase : Union[str, Any] = rate_w UpperCamelCase : Dict = rate_t UpperCamelCase : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] UpperCamelCase : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : Optional[Any] = -2 * np.random.rand(self.conva[1] ) + 1 UpperCamelCase : Any = -2 * np.random.rand(self.num_bpa ) + 1 UpperCamelCase : int = -2 * np.random.rand(self.num_bpa ) + 1 def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(A_ , "wb" ) as f: pickle.dump(A_ , A_ ) print(F"""Model saved: {save_path}""" ) @classmethod def __UpperCamelCase( cls , A_ ): '''simple docstring''' with open(A_ , "rb" ) as f: UpperCamelCase : Optional[Any] = pickle.load(A_ ) # noqa: S301 UpperCamelCase : List[Any] = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) UpperCamelCase : Union[str, Any] = model_dic.get("size_pooling1" ) UpperCamelCase : List[Any] = model_dic.get("num_bp1" ) UpperCamelCase : Dict = model_dic.get("num_bp2" ) UpperCamelCase : Dict = model_dic.get("num_bp3" ) UpperCamelCase : Dict = model_dic.get("rate_weight" ) UpperCamelCase : str = model_dic.get("rate_thre" ) # create model instance UpperCamelCase : Any = CNN(A_ , A_ , A_ , A_ , A_ , A_ , A_ ) # modify model parameter UpperCamelCase : str = model_dic.get("w_conv1" ) UpperCamelCase : Optional[Any] = model_dic.get("wkj" ) UpperCamelCase : int = model_dic.get("vji" ) UpperCamelCase : Any = model_dic.get("thre_conv1" ) UpperCamelCase : Optional[int] = model_dic.get("thre_bp2" ) UpperCamelCase : Union[str, Any] = model_dic.get("thre_bp3" ) return conv_ins def __UpperCamelCase( self , A_ ): '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def __UpperCamelCase( self , A_ ): '''simple docstring''' return round(A_ , 3 ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = convs[0] UpperCamelCase : Optional[Any] = convs[1] UpperCamelCase : Optional[Any] = np.shape(A_ )[0] # get the data slice of original image data, data_focus UpperCamelCase : List[str] = [] for i_focus in range(0 , size_data - size_conv + 1 , A_ ): for j_focus in range(0 , size_data - size_conv + 1 , A_ ): UpperCamelCase : Union[str, Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(A_ ) # calculate the feature map of every single kernel, and saved as list of matrix UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(A_ ): UpperCamelCase : str = [] for i_focus in range(len(A_ ) ): UpperCamelCase : List[Any] = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(A_ ) ) UpperCamelCase : Optional[int] = np.asmatrix(A_ ).reshape( A_ , A_ ) data_featuremap.append(A_ ) # expanding the data slice to One dimenssion UpperCamelCase : List[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(A_ ) ) UpperCamelCase : Tuple = np.asarray(A_ ) return focus_list, data_featuremap def __UpperCamelCase( self , A_ , A_ , A_="average_pool" ): '''simple docstring''' UpperCamelCase : Any = len(featuremaps[0] ) UpperCamelCase : str = int(size_map / size_pooling ) UpperCamelCase : Optional[int] = [] for i_map in range(len(A_ ) ): UpperCamelCase : Tuple = featuremaps[i_map] UpperCamelCase : Any = [] for i_focus in range(0 , A_ , A_ ): for j_focus in range(0 , A_ , A_ ): UpperCamelCase : int = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(A_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(A_ ) ) UpperCamelCase : Optional[Any] = np.asmatrix(A_ ).reshape(A_ , A_ ) featuremap_pooled.append(A_ ) return featuremap_pooled def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = [] for i in range(len(A_ ) ): UpperCamelCase : List[Any] = np.shape(data[i] ) UpperCamelCase : str = data[i].reshape(1 , shapes[0] * shapes[1] ) UpperCamelCase : Optional[int] = data_listed.getA().tolist()[0] data_expanded.extend(A_ ) UpperCamelCase : Any = np.asarray(A_ ) return data_expanded def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = np.asarray(A_ ) UpperCamelCase : List[Any] = np.shape(A_ ) UpperCamelCase : Any = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = [] UpperCamelCase : Optional[int] = 0 for i_map in range(A_ ): UpperCamelCase : int = np.ones((size_map, size_map) ) for i in range(0 , A_ , A_ ): for j in range(0 , A_ , A_ ): UpperCamelCase : str = pd_pool[ i_pool ] UpperCamelCase : str = i_pool + 1 UpperCamelCase : str = np.multiply( A_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(A_ ) return pd_all def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_=bool ): '''simple docstring''' print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(A_ )) ) print((" - - Shape: Teach_Data ", np.shape(A_ )) ) UpperCamelCase : List[str] = 0 UpperCamelCase : Union[str, Any] = [] UpperCamelCase : int = 1_0000 while rp < n_repeat and mse >= error_accuracy: UpperCamelCase : Tuple = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(A_ ) ): # print('------------Learning Image: %d--------------'%p) UpperCamelCase : Any = np.asmatrix(datas_train[p] ) UpperCamelCase : List[str] = np.asarray(datas_teach[p] ) UpperCamelCase , UpperCamelCase : Dict = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : Tuple = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : int = np.shape(A_ ) UpperCamelCase : List[str] = self._expand(A_ ) UpperCamelCase : Optional[int] = data_bp_input UpperCamelCase : str = np.dot(A_ , self.vji.T ) - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) UpperCamelCase : List[Any] = np.dot(A_ , self.wkj.T ) - self.thre_bpa UpperCamelCase : Dict = self.sig(A_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- UpperCamelCase : List[Any] = np.multiply( (data_teach - bp_outa) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : str = np.multiply( np.dot(A_ , self.wkj ) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : Any = np.dot(A_ , self.vji ) UpperCamelCase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga) UpperCamelCase : List[Any] = pd_conva_pooled.T.getA().tolist() UpperCamelCase : List[Any] = self._calculate_gradient_from_pool( A_ , A_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): UpperCamelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] ) UpperCamelCase : List[Any] = self.rate_weight * np.dot(A_ , A_ ) UpperCamelCase : str = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) UpperCamelCase : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer UpperCamelCase : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight UpperCamelCase : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight UpperCamelCase : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre UpperCamelCase : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image UpperCamelCase : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) UpperCamelCase : Any = rp + 1 UpperCamelCase : Union[str, Any] = error_count / patterns all_mse.append(A_ ) def draw_error(): UpperCamelCase : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(A_ , "+-" ) plt.plot(A_ , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(A_ , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(A_ )) ) for p in range(len(A_ ) ): UpperCamelCase : int = np.asmatrix(datas_test[p] ) UpperCamelCase , UpperCamelCase : Any = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : List[str] = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : Dict = self._expand(A_ ) UpperCamelCase : List[Any] = data_bp_input UpperCamelCase : Any = bp_outa * self.vji.T - self.thre_bpa UpperCamelCase : List[Any] = self.sig(A_ ) UpperCamelCase : int = bp_outa * self.wkj.T - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) produce_out.extend(bp_outa.getA().tolist() ) UpperCamelCase : List[str] = [list(map(self.do_round , A_ ) ) for each in produce_out] return np.asarray(A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = np.asmatrix(A_ ) UpperCamelCase , UpperCamelCase : List[Any] = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : str = self.pooling(A_ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
52
"""simple docstring""" from graphs.minimum_spanning_tree_kruskal import kruskal def lowerCamelCase__ ( ) -> List[Any]: """simple docstring""" _UpperCamelCase = 9 _UpperCamelCase = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _UpperCamelCase = kruskal(__snake_case, __snake_case ) _UpperCamelCase = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(__snake_case ) == sorted(__snake_case )
194
0
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = '▁' lowerCAmelCase__ = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece class a_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ = BertGenerationTokenizer UpperCAmelCase_ = False UpperCAmelCase_ = True def __snake_case ( self : int): '''simple docstring''' super().setUp() lowerCAmelCase__ = BertGenerationTokenizer(lowercase__ , keep_accents=lowercase__) tokenizer.save_pretrained(self.tmpdirname) def __snake_case ( self : Tuple): '''simple docstring''' lowerCAmelCase__ = '<s>' lowerCAmelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__) , lowercase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__) , lowercase__) def __snake_case ( self : Optional[Any]): '''simple docstring''' lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<unk>') self.assertEqual(vocab_keys[1] , '<s>') self.assertEqual(vocab_keys[-1] , '<pad>') self.assertEqual(len(lowercase__) , 1_002) def __snake_case ( self : int): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_000) def __snake_case ( self : Optional[int]): '''simple docstring''' lowerCAmelCase__ = BertGenerationTokenizer(lowercase__ , keep_accents=lowercase__) lowerCAmelCase__ = tokenizer.tokenize('This is a test') self.assertListEqual(lowercase__ , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase__) , [285, 46, 10, 170, 382] , ) lowerCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( lowercase__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowercase__) self.assertListEqual( lowercase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(lowercase__) self.assertListEqual( lowercase__ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def __snake_case ( self : str): '''simple docstring''' return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder') @slow def __snake_case ( self : List[Any]): '''simple docstring''' lowerCAmelCase__ = 'Hello World!' lowerCAmelCase__ = [18_536, 2_260, 101] self.assertListEqual(lowercase__ , self.big_tokenizer.encode(lowercase__)) @slow def __snake_case ( self : int): '''simple docstring''' lowerCAmelCase__ = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) lowerCAmelCase__ = [ 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, ] self.assertListEqual(lowercase__ , self.big_tokenizer.encode(lowercase__)) @require_torch @slow def __snake_case ( self : Dict): '''simple docstring''' import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence lowerCAmelCase__ = list(self.big_tokenizer.get_vocab().keys())[:10] lowerCAmelCase__ = ' '.join(lowercase__) lowerCAmelCase__ = self.big_tokenizer.encode_plus(lowercase__ , return_tensors='pt' , return_token_type_ids=lowercase__) lowerCAmelCase__ = self.big_tokenizer.batch_encode_plus( [sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=lowercase__) lowerCAmelCase__ = BertGenerationConfig() lowerCAmelCase__ = BertGenerationEncoder(lowercase__) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowercase__) model(**lowercase__) @slow def __snake_case ( self : int): '''simple docstring''' lowerCAmelCase__ = {'input_ids': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase__ , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
119
import numpy as np def __lowerCamelCase ( lowerCAmelCase__ ): return 1 / (1 + np.exp(-vector )) def __lowerCamelCase ( lowerCAmelCase__ ): return vector * sigmoid(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
119
1
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=1 ): '''simple docstring''' if n_shave_prefix_segments >= 0: return ".".join(path.split('''.''' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('''.''' )[:n_shave_prefix_segments] ) def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any=0 ): '''simple docstring''' _UpperCAmelCase = [] for old_item in old_list: _UpperCAmelCase = old_item.replace('''in_layers.0''' , '''norm1''' ) _UpperCAmelCase = new_item.replace('''in_layers.2''' , '''conv1''' ) _UpperCAmelCase = new_item.replace('''out_layers.0''' , '''norm2''' ) _UpperCAmelCase = new_item.replace('''out_layers.3''' , '''conv2''' ) _UpperCAmelCase = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' ) _UpperCAmelCase = new_item.replace('''skip_connection''' , '''conv_shortcut''' ) _UpperCAmelCase = shave_segments(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ ) mapping.append({'''old''': old_item, '''new''': new_item} ) return mapping def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple=0 ): '''simple docstring''' _UpperCAmelCase = [] for old_item in old_list: _UpperCAmelCase = old_item _UpperCAmelCase = new_item.replace('''norm.weight''' , '''group_norm.weight''' ) _UpperCAmelCase = new_item.replace('''norm.bias''' , '''group_norm.bias''' ) _UpperCAmelCase = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' ) _UpperCAmelCase = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' ) _UpperCAmelCase = shave_segments(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ ) mapping.append({'''old''': old_item, '''new''': new_item} ) return mapping def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : List[str]=None ): '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _UpperCAmelCase = old_checkpoint[path] _UpperCAmelCase = old_tensor.shape[0] // 3 _UpperCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _UpperCAmelCase = old_tensor.shape[0] // config['''num_head_channels'''] // 3 _UpperCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = old_tensor.split(channels // num_heads , dim=1 ) _UpperCAmelCase = query.reshape(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = key.reshape(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = value.reshape(SCREAMING_SNAKE_CASE_ ) for path in paths: _UpperCAmelCase = path['''new'''] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _UpperCAmelCase = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' ) _UpperCAmelCase = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' ) _UpperCAmelCase = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' ) if additional_replacements is not None: for replacement in additional_replacements: _UpperCAmelCase = new_path.replace(replacement['''old'''] , replacement['''new'''] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _UpperCAmelCase = old_checkpoint[path['''old''']][:, :, 0] else: _UpperCAmelCase = old_checkpoint[path['''old''']] def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = {} _UpperCAmelCase = checkpoint['''time_embed.0.weight'''] _UpperCAmelCase = checkpoint['''time_embed.0.bias'''] _UpperCAmelCase = checkpoint['''time_embed.2.weight'''] _UpperCAmelCase = checkpoint['''time_embed.2.bias'''] _UpperCAmelCase = checkpoint['''input_blocks.0.0.weight'''] _UpperCAmelCase = checkpoint['''input_blocks.0.0.bias'''] _UpperCAmelCase = checkpoint['''out.0.weight'''] _UpperCAmelCase = checkpoint['''out.0.bias'''] _UpperCAmelCase = checkpoint['''out.2.weight'''] _UpperCAmelCase = checkpoint['''out.2.bias'''] # Retrieves the keys for the input blocks only _UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} ) _UpperCAmelCase = { layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key] for layer_id in range(SCREAMING_SNAKE_CASE_ ) } # Retrieves the keys for the middle blocks only _UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} ) _UpperCAmelCase = { layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key] for layer_id in range(SCREAMING_SNAKE_CASE_ ) } # Retrieves the keys for the output blocks only _UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} ) _UpperCAmelCase = { layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key] for layer_id in range(SCREAMING_SNAKE_CASE_ ) } for i in range(1 , SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = (i - 1) // (config['''num_res_blocks'''] + 1) _UpperCAmelCase = (i - 1) % (config['''num_res_blocks'''] + 1) _UpperCAmelCase = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key] _UpperCAmelCase = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key] if f'input_blocks.{i}.0.op.weight' in checkpoint: _UpperCAmelCase = checkpoint[ f'input_blocks.{i}.0.op.weight' ] _UpperCAmelCase = checkpoint[ f'input_blocks.{i}.0.op.bias' ] continue _UpperCAmelCase = renew_resnet_paths(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = {'''old''': f'input_blocks.{i}.0', '''new''': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'} _UpperCAmelCase = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''} assign_to_checkpoint( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path, resnet_op] , config=SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = renew_attention_paths(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = { '''old''': f'input_blocks.{i}.1', '''new''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}', } _UpperCAmelCase = { f'input_blocks.{i}.1.qkv.bias': { '''key''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', '''query''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', '''value''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'input_blocks.{i}.1.qkv.weight': { '''key''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', '''query''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', '''value''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , attention_paths_to_split=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , ) _UpperCAmelCase = middle_blocks[0] _UpperCAmelCase = middle_blocks[1] _UpperCAmelCase = middle_blocks[2] _UpperCAmelCase = renew_resnet_paths(SCREAMING_SNAKE_CASE_ ) assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = renew_resnet_paths(SCREAMING_SNAKE_CASE_ ) assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = renew_attention_paths(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = { '''middle_block.1.qkv.bias''': { '''key''': '''mid_block.attentions.0.key.bias''', '''query''': '''mid_block.attentions.0.query.bias''', '''value''': '''mid_block.attentions.0.value.bias''', }, '''middle_block.1.qkv.weight''': { '''key''': '''mid_block.attentions.0.key.weight''', '''query''': '''mid_block.attentions.0.query.weight''', '''value''': '''mid_block.attentions.0.value.weight''', }, } assign_to_checkpoint( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , attention_paths_to_split=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = i // (config['''num_res_blocks'''] + 1) _UpperCAmelCase = i % (config['''num_res_blocks'''] + 1) _UpperCAmelCase = [shave_segments(SCREAMING_SNAKE_CASE_ , 2 ) for name in output_blocks[i]] _UpperCAmelCase = {} for layer in output_block_layers: _UpperCAmelCase , _UpperCAmelCase = layer.split('''.''' )[0], shave_segments(SCREAMING_SNAKE_CASE_ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(SCREAMING_SNAKE_CASE_ ) else: _UpperCAmelCase = [layer_name] if len(SCREAMING_SNAKE_CASE_ ) > 1: _UpperCAmelCase = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key] _UpperCAmelCase = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key] _UpperCAmelCase = renew_resnet_paths(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = renew_resnet_paths(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = {'''old''': f'output_blocks.{i}.0', '''new''': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'} assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _UpperCAmelCase = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] ) _UpperCAmelCase = checkpoint[ f'output_blocks.{i}.{index}.conv.weight' ] _UpperCAmelCase = checkpoint[ f'output_blocks.{i}.{index}.conv.bias' ] # Clear attentions as they have been attributed above. if len(SCREAMING_SNAKE_CASE_ ) == 2: _UpperCAmelCase = [] if len(SCREAMING_SNAKE_CASE_ ): _UpperCAmelCase = renew_attention_paths(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = { '''old''': f'output_blocks.{i}.1', '''new''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}', } _UpperCAmelCase = { f'output_blocks.{i}.1.qkv.bias': { '''key''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', '''query''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', '''value''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'output_blocks.{i}.1.qkv.weight': { '''key''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', '''query''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', '''value''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=SCREAMING_SNAKE_CASE_ , ) else: _UpperCAmelCase = renew_resnet_paths(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _UpperCAmelCase = '''.'''.join(['''output_blocks''', str(SCREAMING_SNAKE_CASE_ ), path['''old''']] ) _UpperCAmelCase = '''.'''.join(['''up_blocks''', str(SCREAMING_SNAKE_CASE_ ), '''resnets''', str(SCREAMING_SNAKE_CASE_ ), path['''new''']] ) _UpperCAmelCase = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __A : Any = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the architecture.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") __A : Dict = parser.parse_args() __A : List[str] = torch.load(args.checkpoint_path) with open(args.config_file) as f: __A : Optional[Any] = json.loads(f.read()) __A : Any = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __A : int = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __A : List[str] = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1])) __A : Optional[Any] = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1])) __A : Any = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
260
from collections import deque from math import floor from random import random from time import time class a__ : """simple docstring""" def __init__( self ) -> Dict: '''simple docstring''' A__ = {} def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Tuple: '''simple docstring''' if self.graph.get(lowercase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: A__ = [[w, v]] if not self.graph.get(lowercase ): A__ = [] def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return list(self.graph ) def UpperCamelCase ( self , lowercase , lowercase ) -> int: '''simple docstring''' if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any: '''simple docstring''' if s == d: return [] A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def UpperCamelCase ( self , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' if c == -1: A__ = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A__ = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def UpperCamelCase ( self , lowercase=-2 ) -> Any: '''simple docstring''' A__ = deque() A__ = [] if s == -2: A__ = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A__ = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' A__ = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' return len(self.graph[u] ) def UpperCamelCase ( self , lowercase=-2 ) -> str: '''simple docstring''' A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s A__ = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return sorted_nodes def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any: '''simple docstring''' A__ = time() self.dfs(lowercase , lowercase ) A__ = time() return end - begin def UpperCamelCase ( self , lowercase=-2 ) -> int: '''simple docstring''' A__ = time() self.bfs(lowercase ) A__ = time() return end - begin class a__ : """simple docstring""" def __init__( self ) -> int: '''simple docstring''' A__ = {} def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Union[str, Any]: '''simple docstring''' if self.graph.get(lowercase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist A__ = [[w, v]] # add the other way if self.graph.get(lowercase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist A__ = [[w, u]] def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) # the other way round if self.graph.get(lowercase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowercase ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> List[str]: '''simple docstring''' if s == d: return [] A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def UpperCamelCase ( self , lowercase=-1 ) -> str: '''simple docstring''' if c == -1: A__ = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A__ = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def UpperCamelCase ( self , lowercase=-2 ) -> Dict: '''simple docstring''' A__ = deque() A__ = [] if s == -2: A__ = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A__ = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' return len(self.graph[u] ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' return list(self.graph ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' A__ = time() self.dfs(lowercase , lowercase ) A__ = time() return end - begin def UpperCamelCase ( self , lowercase=-2 ) -> List[Any]: '''simple docstring''' A__ = time() self.bfs(lowercase ) A__ = time() return end - begin
68
0
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A ( _lowercase ): return (data["data"], data["target"]) def A ( _lowercase , _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Any = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(_lowercase , _lowercase ) # Predict target for test data SCREAMING_SNAKE_CASE : Optional[int] = xgb.predict(_lowercase ) SCREAMING_SNAKE_CASE : Dict = predictions.reshape(len(_lowercase ) , 1 ) return predictions def A ( ): SCREAMING_SNAKE_CASE : str = fetch_california_housing() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = data_handling(_lowercase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = train_test_split( _lowercase , _lowercase , test_size=0.25 , random_state=1 ) SCREAMING_SNAKE_CASE : Any = xgboost(_lowercase , _lowercase , _lowercase ) # Error printing print(f"""Mean Absolute Error : {mean_absolute_error(_lowercase , _lowercase )}""" ) print(f"""Mean Square Error : {mean_squared_error(_lowercase , _lowercase )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
258
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def A ( _lowercase , _lowercase , _lowercase , _lowercase ): if isinstance(_lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Any = np.full((len(_lowercase ), sequence_length, 2) , _lowercase ) else: SCREAMING_SNAKE_CASE : List[Any] = np.full((len(_lowercase ), sequence_length) , _lowercase ) for i, tensor in enumerate(_lowercase ): if padding_side == "right": if isinstance(_lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Tuple = tensor[:sequence_length] else: SCREAMING_SNAKE_CASE : Any = tensor[:sequence_length] else: if isinstance(_lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Any = tensor[:sequence_length] else: SCREAMING_SNAKE_CASE : Union[str, Any] = tensor[:sequence_length] return out_tensor.tolist() def A ( _lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = ord(_lowercase ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True SCREAMING_SNAKE_CASE : Optional[Any] = unicodedata.category(_lowercase ) if cat.startswith('''P''' ): return True return False @dataclass class lowercase__ ( UpperCamelCase_): UpperCamelCase_ = 42 UpperCamelCase_ = True UpperCamelCase_ = None UpperCamelCase_ = None UpperCamelCase_ = -100 UpperCamelCase_ = "pt" def __A ( self : Optional[int] , UpperCamelCase__ : List[Any] ): '''simple docstring''' import torch SCREAMING_SNAKE_CASE : str = '''label''' if '''label''' in features[0].keys() else '''labels''' SCREAMING_SNAKE_CASE : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None SCREAMING_SNAKE_CASE : Dict = self.tokenizer.pad( UpperCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch SCREAMING_SNAKE_CASE : Tuple = torch.tensor(batch['''entity_ids'''] ).shape[1] SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.padding_side if padding_side == "right": SCREAMING_SNAKE_CASE : int = [ list(UpperCamelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase__ )) for label in labels ] else: SCREAMING_SNAKE_CASE : str = [ [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase__ )) + list(UpperCamelCase__ ) for label in labels ] SCREAMING_SNAKE_CASE : List[str] = [feature['''ner_tags'''] for feature in features] SCREAMING_SNAKE_CASE : Dict = padding_tensor(UpperCamelCase__ , -1 , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Any = [feature['''original_entity_spans'''] for feature in features] SCREAMING_SNAKE_CASE : Dict = padding_tensor(UpperCamelCase__ , (-1, -1) , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.tensor(UpperCamelCase__ , dtype=torch.intaa ) for k, v in batch.items()} return batch
258
1
"""simple docstring""" import logging import os from .state import PartialState class lowercase__ ( logging.LoggerAdapter ): @staticmethod def UpperCAmelCase__ ( snake_case__ : List[str] ): lowerCamelCase_ : Dict =PartialState() return not main_process_only or (main_process_only and state.is_main_process) def UpperCAmelCase__ ( self : List[str] , snake_case__ : int , snake_case__ : int , *snake_case__ : Optional[Any] , **snake_case__ : int ): if PartialState._shared_state == {}: raise RuntimeError( "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." ) lowerCamelCase_ : Tuple =kwargs.pop("main_process_only" , snake_case__ ) lowerCamelCase_ : Optional[Any] =kwargs.pop("in_order" , snake_case__ ) if self.isEnabledFor(snake_case__ ): if self._should_log(snake_case__ ): lowerCamelCase_ , lowerCamelCase_ : List[Any] =self.process(snake_case__ , snake_case__ ) self.logger.log(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ ) elif in_order: lowerCamelCase_ : Optional[Any] =PartialState() for i in range(state.num_processes ): if i == state.process_index: lowerCamelCase_ , lowerCamelCase_ : Dict =self.process(snake_case__ , snake_case__ ) self.logger.log(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ ) state.wait_for_everyone() def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : str = None ) -> str: if log_level is None: lowerCamelCase_ : List[Any] =os.environ.get("ACCELERATE_LOG_LEVEL" , lowerCamelCase__ ) lowerCamelCase_ : str =logging.getLogger(lowerCamelCase__ ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(lowerCamelCase__ , {} )
144
"""simple docstring""" import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class lowercase__ ( snake_case__ ): _UpperCAmelCase :BigBirdConfig _UpperCAmelCase :jnp.dtype = jnp.floataa _UpperCAmelCase :bool = True def UpperCAmelCase__ ( self : Dict ): super().setup() lowerCamelCase_ : List[str] =nn.Dense(5 , dtype=self.dtype ) def __call__( self : Dict , *snake_case__ : Optional[int] , **snake_case__ : Any ): lowerCamelCase_ : int =super().__call__(*snake_case__ , **snake_case__ ) lowerCamelCase_ : Tuple =self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class lowercase__ ( snake_case__ ): _UpperCAmelCase :List[str] = FlaxBigBirdForNaturalQuestionsModule def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> List[str]: def cross_entropy(lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int=None ): lowerCamelCase_ : List[str] =logits.shape[-1] lowerCamelCase_ : List[str] =(labels[..., None] == jnp.arange(lowerCamelCase__ )[None]).astype("f4" ) lowerCamelCase_ : str =jax.nn.log_softmax(lowerCamelCase__ , axis=-1 ) lowerCamelCase_ : Tuple =-jnp.sum(labels * logits , axis=-1 ) if reduction is not None: lowerCamelCase_ : str =reduction(lowerCamelCase__ ) return loss lowerCamelCase_ : int =partial(lowerCamelCase__ , reduction=jnp.mean ) lowerCamelCase_ : int =cross_entropy(lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ : Any =cross_entropy(lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ : List[str] =cross_entropy(lowerCamelCase__ , lowerCamelCase__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class lowercase__ : _UpperCAmelCase :str = "google/bigbird-roberta-base" _UpperCAmelCase :int = 3000 _UpperCAmelCase :int = 10500 _UpperCAmelCase :int = 128 _UpperCAmelCase :int = 3 _UpperCAmelCase :int = 1 _UpperCAmelCase :int = 5 # tx_args _UpperCAmelCase :float = 3e-5 _UpperCAmelCase :float = 0.0 _UpperCAmelCase :int = 20000 _UpperCAmelCase :float = 0.00_95 _UpperCAmelCase :str = "bigbird-roberta-natural-questions" _UpperCAmelCase :str = "training-expt" _UpperCAmelCase :str = "data/nq-training.jsonl" _UpperCAmelCase :str = "data/nq-validation.jsonl" def UpperCAmelCase__ ( self : Union[str, Any] ): os.makedirs(self.base_dir , exist_ok=snake_case__ ) lowerCamelCase_ : Tuple =os.path.join(self.base_dir , self.save_dir ) lowerCamelCase_ : Optional[Any] =self.batch_size_per_device * jax.device_count() @dataclass class lowercase__ : _UpperCAmelCase :int _UpperCAmelCase :int = 4096 # no dynamic padding on TPUs def __call__( self : List[str] , snake_case__ : List[str] ): lowerCamelCase_ : Optional[int] =self.collate_fn(snake_case__ ) lowerCamelCase_ : List[str] =jax.tree_util.tree_map(snake_case__ , snake_case__ ) return batch def UpperCAmelCase__ ( self : str , snake_case__ : Dict ): lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =self.fetch_inputs(features["input_ids"] ) lowerCamelCase_ : Dict ={ "input_ids": jnp.array(snake_case__ , dtype=jnp.intaa ), "attention_mask": jnp.array(snake_case__ , dtype=jnp.intaa ), "start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ), "end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ), "pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ), } return batch def UpperCAmelCase__ ( self : List[Any] , snake_case__ : list ): lowerCamelCase_ : Any =[self._fetch_inputs(snake_case__ ) for ids in input_ids] return zip(*snake_case__ ) def UpperCAmelCase__ ( self : int , snake_case__ : list ): lowerCamelCase_ : List[Any] =[1 for _ in range(len(snake_case__ ) )] while len(snake_case__ ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ) -> Optional[int]: if seed is not None: lowerCamelCase_ : Union[str, Any] =dataset.shuffle(seed=lowerCamelCase__ ) for i in range(len(lowerCamelCase__ ) // batch_size ): lowerCamelCase_ : Any =dataset[i * batch_size : (i + 1) * batch_size] yield dict(lowerCamelCase__ ) @partial(jax.pmap , axis_name="batch" ) def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Tuple ) -> int: def loss_fn(lowerCamelCase__ : Optional[int] ): lowerCamelCase_ : List[Any] =model_inputs.pop("start_labels" ) lowerCamelCase_ : Dict =model_inputs.pop("end_labels" ) lowerCamelCase_ : Any =model_inputs.pop("pooled_labels" ) lowerCamelCase_ : Tuple =state.apply_fn(**lowerCamelCase__ , params=lowerCamelCase__ , dropout_rng=lowerCamelCase__ , train=lowerCamelCase__ ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any =outputs return state.loss_fn( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =jax.random.split(lowerCamelCase__ ) lowerCamelCase_ : Union[str, Any] =jax.value_and_grad(lowerCamelCase__ ) lowerCamelCase_ , lowerCamelCase_ : Tuple =grad_fn(state.params ) lowerCamelCase_ : List[Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" ) lowerCamelCase_ : int =jax.lax.pmean(lowerCamelCase__ , "batch" ) lowerCamelCase_ : List[Any] =state.apply_gradients(grads=lowerCamelCase__ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="batch" ) def _snake_case ( lowerCamelCase__ : List[str] , **lowerCamelCase__ : Union[str, Any] ) -> Dict: lowerCamelCase_ : Dict =model_inputs.pop("start_labels" ) lowerCamelCase_ : List[Any] =model_inputs.pop("end_labels" ) lowerCamelCase_ : Union[str, Any] =model_inputs.pop("pooled_labels" ) lowerCamelCase_ : Tuple =state.apply_fn(**lowerCamelCase__ , params=state.params , train=lowerCamelCase__ ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =outputs lowerCamelCase_ : int =state.loss_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ : str =jax.lax.pmean({"loss": loss} , axis_name="batch" ) return metrics class lowercase__ ( train_state.TrainState ): _UpperCAmelCase :Callable = struct.field(pytree_node=snake_case__ ) @dataclass class lowercase__ : _UpperCAmelCase :Args _UpperCAmelCase :Callable _UpperCAmelCase :Callable _UpperCAmelCase :Callable _UpperCAmelCase :Callable _UpperCAmelCase :wandb _UpperCAmelCase :Callable = None def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : str=None ): lowerCamelCase_ : int =model.params lowerCamelCase_ : Optional[Any] =TrainState.create( apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , loss_fn=snake_case__ , ) if ckpt_dir is not None: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any =restore_checkpoint(snake_case__ , snake_case__ ) lowerCamelCase_ : Tuple ={ "lr": args.lr, "init_lr": args.init_lr, "warmup_steps": args.warmup_steps, "num_train_steps": num_train_steps, "weight_decay": args.weight_decay, } lowerCamelCase_ , lowerCamelCase_ : Tuple =build_tx(**snake_case__ ) lowerCamelCase_ : Union[str, Any] =train_state.TrainState( step=snake_case__ , apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , opt_state=snake_case__ , ) lowerCamelCase_ : int =args lowerCamelCase_ : Union[str, Any] =data_collator lowerCamelCase_ : Dict =lr lowerCamelCase_ : Optional[Any] =params lowerCamelCase_ : Dict =jax_utils.replicate(snake_case__ ) return state def UpperCAmelCase__ ( self : Dict , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : List[str] ): lowerCamelCase_ : str =self.args lowerCamelCase_ : List[Any] =len(snake_case__ ) // args.batch_size lowerCamelCase_ : Optional[int] =jax.random.PRNGKey(0 ) lowerCamelCase_ : Dict =jax.random.split(snake_case__ , jax.device_count() ) for epoch in range(args.max_epochs ): lowerCamelCase_ : int =jnp.array(0 , dtype=jnp.floataa ) lowerCamelCase_ : List[Any] =get_batched_dataset(snake_case__ , args.batch_size , seed=snake_case__ ) lowerCamelCase_ : Dict =0 for batch in tqdm(snake_case__ , total=snake_case__ , desc=F"""Running EPOCH-{epoch}""" ): lowerCamelCase_ : str =self.data_collator(snake_case__ ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any =self.train_step_fn(snake_case__ , snake_case__ , **snake_case__ ) running_loss += jax_utils.unreplicate(metrics["loss"] ) i += 1 if i % args.logging_steps == 0: lowerCamelCase_ : Tuple =jax_utils.unreplicate(state.step ) lowerCamelCase_ : Optional[Any] =running_loss.item() / i lowerCamelCase_ : Any =self.scheduler_fn(state_step - 1 ) lowerCamelCase_ : Optional[Any] =self.evaluate(snake_case__ , snake_case__ ) lowerCamelCase_ : str ={ "step": state_step.item(), "eval_loss": eval_loss.item(), "tr_loss": tr_loss, "lr": lr.item(), } tqdm.write(str(snake_case__ ) ) self.logger.log(snake_case__ , commit=snake_case__ ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=snake_case__ ) def UpperCAmelCase__ ( self : str , snake_case__ : Dict , snake_case__ : Union[str, Any] ): lowerCamelCase_ : List[Any] =get_batched_dataset(snake_case__ , self.args.batch_size ) lowerCamelCase_ : List[str] =len(snake_case__ ) // self.args.batch_size lowerCamelCase_ : Tuple =jnp.array(0 , dtype=jnp.floataa ) lowerCamelCase_ : Any =0 for batch in tqdm(snake_case__ , total=snake_case__ , desc="Evaluating ... " ): lowerCamelCase_ : Optional[Any] =self.data_collator(snake_case__ ) lowerCamelCase_ : List[str] =self.val_step_fn(snake_case__ , **snake_case__ ) running_loss += jax_utils.unreplicate(metrics["loss"] ) i += 1 return running_loss / i def UpperCAmelCase__ ( self : str , snake_case__ : Optional[int] , snake_case__ : Any ): lowerCamelCase_ : List[Any] =jax_utils.unreplicate(snake_case__ ) print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... " ) self.model_save_fn(snake_case__ , params=state.params ) with open(os.path.join(snake_case__ , "opt_state.msgpack" ) , "wb" ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(snake_case__ , "args.joblib" ) ) joblib.dump(self.data_collator , os.path.join(snake_case__ , "data_collator.joblib" ) ) with open(os.path.join(snake_case__ , "training_state.json" ) , "w" ) as f: json.dump({"step": state.step.item()} , snake_case__ ) print("DONE" ) def _snake_case ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Any ) -> List[Any]: print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " ) with open(os.path.join(lowerCamelCase__ , "flax_model.msgpack" ) , "rb" ) as f: lowerCamelCase_ : Any =from_bytes(state.params , f.read() ) with open(os.path.join(lowerCamelCase__ , "opt_state.msgpack" ) , "rb" ) as f: lowerCamelCase_ : Optional[Any] =from_bytes(state.opt_state , f.read() ) lowerCamelCase_ : List[Any] =joblib.load(os.path.join(lowerCamelCase__ , "args.joblib" ) ) lowerCamelCase_ : int =joblib.load(os.path.join(lowerCamelCase__ , "data_collator.joblib" ) ) with open(os.path.join(lowerCamelCase__ , "training_state.json" ) , "r" ) as f: lowerCamelCase_ : Optional[Any] =json.load(lowerCamelCase__ ) lowerCamelCase_ : Optional[Any] =training_state["step"] print("DONE" ) return params, opt_state, step, args, data_collator def _snake_case ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ) -> str: lowerCamelCase_ : Dict =num_train_steps - warmup_steps lowerCamelCase_ : Optional[Any] =optax.linear_schedule(init_value=lowerCamelCase__ , end_value=lowerCamelCase__ , transition_steps=lowerCamelCase__ ) lowerCamelCase_ : List[Any] =optax.linear_schedule(init_value=lowerCamelCase__ , end_value=1e-7 , transition_steps=lowerCamelCase__ ) lowerCamelCase_ : Dict =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ) -> List[str]: def weight_decay_mask(lowerCamelCase__ : str ): lowerCamelCase_ : Union[str, Any] =traverse_util.flatten_dict(lowerCamelCase__ ) lowerCamelCase_ : Any ={k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()} return traverse_util.unflatten_dict(lowerCamelCase__ ) lowerCamelCase_ : Dict =scheduler_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ : List[str] =optax.adamw(learning_rate=lowerCamelCase__ , weight_decay=lowerCamelCase__ , mask=lowerCamelCase__ ) return tx, lr
144
1
import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class UpperCamelCase ( nn.Module ): def __init__( self ): super().__init__() A__ = nn.Linear(3 , 4 ) A__ = nn.BatchNormad(4 ) A__ = nn.Linear(4 , 5 ) def __A ( self , UpperCAmelCase__ ): return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) ) class UpperCamelCase ( _UpperCAmelCase ): def __A ( self , UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ): return (args[0] + 1,) + args[1:], kwargs class UpperCamelCase ( _UpperCAmelCase ): def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ): return output + 1 class UpperCamelCase ( unittest.TestCase ): def __A ( self ): A__ = ModelForTest() A__ = ModelHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(test_model._hf_hook , UpperCAmelCase__ ) self.assertTrue(hasattr(UpperCAmelCase__ , "_old_forward" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , "forward" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] ) remove_hook_from_module(UpperCAmelCase__ ) self.assertFalse(hasattr(UpperCAmelCase__ , "_hf_hook" ) ) self.assertFalse(hasattr(UpperCAmelCase__ , "_old_forward" ) ) def __A ( self ): A__ = ModelForTest() A__ = ModelHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ , append=UpperCAmelCase__ ) self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(UpperCAmelCase__ , "_old_forward" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , "forward" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] ) remove_hook_from_module(UpperCAmelCase__ ) self.assertFalse(hasattr(UpperCAmelCase__ , "_hf_hook" ) ) self.assertFalse(hasattr(UpperCAmelCase__ , "_old_forward" ) ) def __A ( self ): A__ = ModelForTest() A__ = torch.randn(2 , 3 ) A__ = test_model(x + 1 ) A__ = test_model(x + 2 ) A__ = PreForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain A__ = PreForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks A__ = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = test_model(UpperCAmelCase__ ) assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) def __A ( self ): A__ = ModelForTest() A__ = torch.randn(2 , 3 ) A__ = test_model(UpperCAmelCase__ ) A__ = PostForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain A__ = PostForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks A__ = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = test_model(UpperCAmelCase__ ) assert torch.allclose(UpperCAmelCase__ , output + 2 , atol=1e-5 ) def __A ( self ): A__ = ModelForTest() A__ = torch.randn(2 , 3 ) A__ = test_model(UpperCAmelCase__ ) A__ = PostForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) A__ = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 ) ) self.assertTrue(outputa.requires_grad ) A__ = True A__ = test_model(UpperCAmelCase__ ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def __A ( self ): A__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device A__ = torch.randn(2 , 3 ) A__ = model(UpperCAmelCase__ ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(UpperCAmelCase__ , AlignDevicesHook(io_same_device=UpperCAmelCase__ ) ) A__ = torch.randn(2 , 3 ).to(0 ) A__ = model(UpperCAmelCase__ ) self.assertEqual(output.device , torch.device(0 ) ) def __A ( self ): A__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices A__ = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True} add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) # Buffers are not included in the offload by default, so are on the execution device A__ = torch.device(hook_kwargs["execution_device"] ) self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ ) A__ = torch.randn(2 , 3 ) A__ = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # Now test with buffers included in the offload A__ = { "execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True, "offload_buffers": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) ) A__ = torch.randn(2 , 3 ) A__ = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) def __A ( self ): A__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices A__ = 0 if torch.cuda.is_available() else "cpu" attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) # Buffers are not included in the offload by default, so are on the execution device A__ = torch.device(UpperCAmelCase__ ) self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ ) A__ = torch.randn(2 , 3 ) A__ = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # Now test with buffers included in the offload attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , offload_buffers=UpperCAmelCase__ ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) ) A__ = torch.randn(2 , 3 ) A__ = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) def __A ( self ): A__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices A__ = 0 if torch.cuda.is_available() else "cpu" attach_align_device_hook( UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) # Buffers are not included in the offload by default, so are on the execution device A__ = torch.device(UpperCAmelCase__ ) self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ ) A__ = torch.randn(2 , 3 ) A__ = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # Now test with buffers included in the offload attach_align_device_hook( UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase__ , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) ) A__ = torch.randn(2 , 3 ) A__ = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
198
import datasets from .evaluate import evaluate UpperCAmelCase_ : List[Any] = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n" UpperCAmelCase_ : Any = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n" UpperCAmelCase_ : Tuple = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase ( datasets.Metric ): def __A ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )}, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} A__ = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] A__ = evaluate(dataset=UpperCAmelCase__ , predictions=UpperCAmelCase__ ) return score
198
1
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : List[str] , lowercase : List[str] , lowercase : Union[str, Any] ): '''simple docstring''' lowerCamelCase_ = [False] * len(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ = [] queue.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ = True while queue: lowerCamelCase_ = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ = True lowerCamelCase_ = u return visited[t] def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Tuple , lowercase : List[Any] ): '''simple docstring''' lowerCamelCase_ = [-1] * (len(_SCREAMING_SNAKE_CASE )) lowerCamelCase_ = 0 while bfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ = float('Inf' ) lowerCamelCase_ = sink while s != source: # Find the minimum value in select path lowerCamelCase_ = min(_SCREAMING_SNAKE_CASE , graph[parent[s]][s] ) lowerCamelCase_ = parent[s] max_flow += path_flow lowerCamelCase_ = sink while v != source: lowerCamelCase_ = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCamelCase_ = parent[v] return max_flow lowerCamelCase : Union[str, Any] = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCamelCase : List[str] = 0, 5 print(ford_fulkerson(graph, source, sink))
204
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = (DPMSolverSinglestepScheduler,) UpperCamelCase__ = (("""num_inference_steps""", 25),) def lowercase__ ( self : Tuple , **__UpperCamelCase : Tuple )->Any: _UpperCAmelCase = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf''' ), '''variance_type''': None, } config.update(**__UpperCamelCase ) return config def lowercase__ ( self : Dict , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : Optional[int] )->Tuple: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : Any )->Union[str, Any]: pass def lowercase__ ( self : str , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : List[Any] )->Dict: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : int , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Optional[int] )->List[Any]: if scheduler is None: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def lowercase__ ( self : List[Any] )->Dict: _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = 5_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3 def lowercase__ ( self : Dict )->Dict: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def lowercase__ ( self : str )->Optional[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 _UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->int: self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , algorithm_type='''dpmsolver++''' , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def lowercase__ ( self : str )->str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Tuple: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) _UpperCAmelCase = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def lowercase__ ( self : Dict )->List[str]: self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def lowercase__ ( self : Dict )->str: self.check_over_configs(lambda_min_clipped=-float('''inf''' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def lowercase__ ( self : List[str] )->int: self.check_over_configs(variance_type=__UpperCamelCase ) self.check_over_configs(variance_type='''learned_range''' ) def lowercase__ ( self : List[str] )->Union[str, Any]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def lowercase__ ( self : List[Any] )->int: _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : List[str] )->List[str]: _UpperCAmelCase = self.full_loop(use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3 def lowercase__ ( self : int )->List[Any]: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3 def lowercase__ ( self : Optional[Any] )->Dict: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa
260
0
lowerCamelCase__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} lowerCamelCase__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def A(__a: dict[int, list[int]] , __a: int , __a: list[bool] ): lowerCAmelCase_ = True lowerCAmelCase_ = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(__a , __a , __a ) order.append(__a ) return order def A(__a: dict[int, list[int]] , __a: int , __a: list[bool] ): lowerCAmelCase_ = True lowerCAmelCase_ = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(__a , __a , __a ) return component def A(__a: dict[int, list[int]] ): lowerCAmelCase_ = len(__a ) * [False] lowerCAmelCase_ = {vert: [] for vert in range(len(__a ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(__a ) lowerCAmelCase_ = [] for i, was_visited in enumerate(__a ): if not was_visited: order += topology_sort(__a , __a , __a ) lowerCAmelCase_ = [] lowerCAmelCase_ = len(__a ) * [False] for i in range(len(__a ) ): lowerCAmelCase_ = order[len(__a ) - i - 1] if not visited[vert]: lowerCAmelCase_ = find_components(__a , __a , __a ) components_list.append(__a ) return components_list
22
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowerCamelCase__ = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def A(__a: str , __a: List[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) return (preds == labels).mean() def A(__a: Any , __a: Any ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = simple_accuracy(__a , __a ) lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def A(__a: List[str] , __a: Optional[int] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = pearsonr(__a , __a )[0] lowerCAmelCase_ = spearmanr(__a , __a )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def A(__a: Union[str, Any] , __a: Any , __a: str ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" if task_name == "cola": return {"mcc": matthews_corrcoef(__a , __a )} elif task_name == "sst-2": return {"acc": simple_accuracy(__a , __a )} elif task_name == "mrpc": return acc_and_fa(__a , __a ) elif task_name == "sts-b": return pearson_and_spearman(__a , __a ) elif task_name == "qqp": return acc_and_fa(__a , __a ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__a , __a )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__a , __a )} elif task_name == "qnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "rte": return {"acc": simple_accuracy(__a , __a )} elif task_name == "wnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "hans": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a ) def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) if len(__a ) != len(__a ): raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" ) if task_name == "xnli": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a )
22
1
"""simple docstring""" import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('1.0.0a'): raise Exception('requires fairseq >= 1.0.0a') logging.set_verbosity_info() UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip' def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case ) roberta.eval() # disable dropout SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,) if classification_head: SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0] print("""Our RoBERTa config:""" ,_snake_case ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case ) model.eval() # Now let's copy all the weights. # Embeddings SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i] SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias # self attention SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias # self-attention output SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias # intermediate SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias # output SCREAMING_SNAKE_CASE__ : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias # end of layer if classification_head: SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias else: # LM Head SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1 SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0] if classification_head: SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) ) else: SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0] print(our_output.shape ,their_output.shape ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 ) print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" ) if not success: raise Exception("""Something went wRoNg""" ) pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--classification_head', action='store_true', help='Whether to convert a final classification head.' ) UpperCAmelCase__ : Any = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
25
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) __lowercase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' for attribute in key.split('''.''' ): __UpperCamelCase :str = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: __UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: __UpperCamelCase :Union[str, Any] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __UpperCamelCase :str = value elif weight_type == "weight_g": __UpperCamelCase :List[str] = value elif weight_type == "weight_v": __UpperCamelCase :str = value elif weight_type == "bias": __UpperCamelCase :Union[str, Any] = value else: __UpperCamelCase :str = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :List[Any] = [] __UpperCamelCase :int = fairseq_model.state_dict() __UpperCamelCase :List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __UpperCamelCase :List[Any] = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , ) __UpperCamelCase :List[str] = True else: for key, mapped_key in MAPPING.items(): __UpperCamelCase :Dict = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): __UpperCamelCase :Optional[Any] = True if "*" in mapped_key: __UpperCamelCase :List[str] = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2] __UpperCamelCase :Optional[int] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE ) if "weight_g" in name: __UpperCamelCase :int = '''weight_g''' elif "weight_v" in name: __UpperCamelCase :List[Any] = '''weight_v''' elif "weight" in name: __UpperCamelCase :Dict = '''weight''' elif "bias" in name: __UpperCamelCase :Dict = '''bias''' else: __UpperCamelCase :Dict = None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Tuple = full_name.split('''conv_layers.''' )[-1] __UpperCamelCase :Optional[int] = name.split('''.''' ) __UpperCamelCase :str = int(items[0] ) __UpperCamelCase :List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __UpperCamelCase :Dict = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __UpperCamelCase :Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __UpperCamelCase :int = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __UpperCamelCase :Union[str, Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) @torch.no_grad() def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ): '''simple docstring''' if config_path is not None: __UpperCamelCase :Tuple = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE ) else: __UpperCamelCase :Optional[int] = HubertConfig() if is_finetuned: if dict_path: __UpperCamelCase :Optional[int] = Dictionary.load(SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __UpperCamelCase :Optional[int] = target_dict.pad_index __UpperCamelCase :Dict = target_dict.bos_index __UpperCamelCase :str = target_dict.eos_index __UpperCamelCase :Dict = len(target_dict.symbols ) __UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , SCREAMING_SNAKE_CASE ) __UpperCamelCase :Optional[int] = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , ) __UpperCamelCase :Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False __UpperCamelCase :Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , ) __UpperCamelCase :Any = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) __UpperCamelCase :List[str] = HubertForCTC(SCREAMING_SNAKE_CASE ) else: __UpperCamelCase :str = HubertModel(SCREAMING_SNAKE_CASE ) if is_finetuned: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __UpperCamelCase :Dict = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) __lowercase = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
43
0
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( 'stable diffusion controlnet', '0.22.0', 'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.', standard_warn=False, stacklevel=3, )
61
'''simple docstring''' UpperCAmelCase_ = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' UpperCAmelCase__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100000] number //= 100000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution UpperCAmelCase_ = [None] * 1_0_0_0_0_0_0_0 UpperCAmelCase_ = True UpperCAmelCase_ = False def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore UpperCAmelCase__ = chain(next_number(SCREAMING_SNAKE_CASE__ ) ) UpperCAmelCase__ = number_chain while number < 10000000: UpperCAmelCase__ = number_chain number *= 10 return number_chain def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 10000000 ): '''simple docstring''' for i in range(1 , SCREAMING_SNAKE_CASE__ ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod() print(f"{solution() = }")
61
1
def _UpperCAmelCase ( snake_case , snake_case ): """simple docstring""" _lowerCAmelCase = [1] for i in range(2 , snake_case ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" _lowerCAmelCase = [] _lowerCAmelCase = list(range(snake_case ) ) # Find permutation while factorials: _lowerCAmelCase = factorials.pop() _lowerCAmelCase , _lowerCAmelCase = divmod(snake_case , snake_case ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
82
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self :List[Any] , snake_case :int , snake_case :int , snake_case :Optional[int] = None , snake_case :int = 50_257 , snake_case :int = 1_024 , snake_case :int = 768 , snake_case :int = 12 , snake_case :int = 12 , snake_case :Optional[int] = None , snake_case :str = "gelu_new" , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 1e-5 , snake_case :float = 0.02 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = False , snake_case :bool = False , ): '''simple docstring''' super().__init__() A_ : Tuple = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and" f" `n_embd`: {n_embd} are not equal." ) A_ : List[Any] = prefix_inner_dim A_ : Union[str, Any] = prefix_hidden_dim A_ : List[str] = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) A_ : List[Any] = ( nn.Linear(self.prefix_hidden_dim , snake_case ) if self.prefix_hidden_dim is not None else nn.Identity() ) A_ : List[Any] = GPTaConfig( vocab_size=snake_case , n_positions=snake_case , n_embd=snake_case , n_layer=snake_case , n_head=snake_case , n_inner=snake_case , activation_function=snake_case , resid_pdrop=snake_case , embd_pdrop=snake_case , attn_pdrop=snake_case , layer_norm_epsilon=snake_case , initializer_range=snake_case , scale_attn_weights=snake_case , use_cache=snake_case , scale_attn_by_inverse_layer_idx=snake_case , reorder_and_upcast_attn=snake_case , ) A_ : Optional[Any] = GPTaLMHeadModel(snake_case ) def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.Tensor , snake_case :torch.Tensor , snake_case :Optional[torch.Tensor] = None , snake_case :Optional[torch.Tensor] = None , ): '''simple docstring''' A_ : Any = self.transformer.transformer.wte(snake_case ) A_ : str = self.encode_prefix(snake_case ) A_ : Union[str, Any] = self.decode_prefix(snake_case ) A_ : int = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: A_ : Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) A_ : int = torch.cat((dummy_token, input_ids) , dim=1 ) A_ : Union[str, Any] = self.transformer(inputs_embeds=snake_case , labels=snake_case , attention_mask=snake_case ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def SCREAMING_SNAKE_CASE ( self :str , snake_case :int , snake_case :torch.device ): '''simple docstring''' return torch.zeros(snake_case , self.prefix_length , dtype=torch.intaa , device=snake_case ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :int ): '''simple docstring''' return self.encode_prefix(snake_case ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Dict , snake_case :Optional[int] , snake_case :Any ): '''simple docstring''' A_ : Any = torch.split(snake_case , 1 , dim=0 ) A_ : Optional[int] = [] A_ : Union[str, Any] = [] for feature in features: A_ : Tuple = self.decode_prefix(feature.to(snake_case ) ) # back to the clip feature # Only support beam search for now A_ , A_ : Dict = self.generate_beam( input_embeds=snake_case , device=snake_case , eos_token_id=snake_case ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) A_ : int = torch.stack(snake_case ) A_ : int = torch.stack(snake_case ) return generated_tokens, generated_seq_lengths @torch.no_grad() def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int=None , snake_case :str=None , snake_case :int=None , snake_case :int = 5 , snake_case :int = 67 , snake_case :float = 1.0 , snake_case :Optional[int] = None , ): '''simple docstring''' A_ : Optional[Any] = eos_token_id A_ : List[Any] = None A_ : List[Any] = None A_ : str = torch.ones(snake_case , device=snake_case , dtype=torch.int ) A_ : Any = torch.zeros(snake_case , device=snake_case , dtype=torch.bool ) if input_embeds is not None: A_ : Any = input_embeds else: A_ : Optional[Any] = self.transformer.transformer.wte(snake_case ) for i in range(snake_case ): A_ : Optional[Any] = self.transformer(inputs_embeds=snake_case ) A_ : str = outputs.logits A_ : int = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) A_ : List[str] = logits.softmax(-1 ).log() if scores is None: A_ , A_ : Union[str, Any] = logits.topk(snake_case , -1 ) A_ : Tuple = generated.expand(snake_case , *generated.shape[1:] ) A_ , A_ : str = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: A_ : Union[str, Any] = next_tokens else: A_ : List[str] = tokens.expand(snake_case , *tokens.shape[1:] ) A_ : Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1 ) else: A_ : List[str] = -float(np.inf ) A_ : List[Any] = 0 A_ : Union[str, Any] = scores[:, None] + logits seq_lengths[~is_stopped] += 1 A_ : Optional[Any] = scores_sum / seq_lengths[:, None] A_ , A_ : List[str] = scores_sum_average.view(-1 ).topk(snake_case , -1 ) A_ : str = next_tokens // scores_sum.shape[1] A_ : Union[str, Any] = seq_lengths[next_tokens_source] A_ : Optional[int] = next_tokens % scores_sum.shape[1] A_ : Tuple = next_tokens.unsqueeze(1 ) A_ : Tuple = tokens[next_tokens_source] A_ : Dict = torch.cat((tokens, next_tokens) , dim=1 ) A_ : Dict = generated[next_tokens_source] A_ : Union[str, Any] = scores_sum_average * seq_lengths A_ : Optional[int] = is_stopped[next_tokens_source] A_ : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) A_ : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1 ) A_ : Any = is_stopped + next_tokens.eq(snake_case ).squeeze() if is_stopped.all(): break A_ : int = scores / seq_lengths A_ : str = scores.argsort(descending=snake_case ) # tokens tensors are already padded to max_seq_length A_ : Dict = [tokens[i] for i in order] A_ : int = torch.stack(snake_case , dim=0 ) A_ : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
300
0
from __future__ import annotations class lowerCAmelCase__: '''simple docstring''' def __init__( self , __lowerCamelCase = 0 ) -> int: _SCREAMING_SNAKE_CASE : int = key def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> list[str]: assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : str = key or self.__key or 1 # make sure key is an appropriate size key %= 2_5_5 return [chr(ord(__lowerCamelCase ) ^ key ) for ch in content] def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> list[str]: assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = key or self.__key or 1 # make sure key is an appropriate size key %= 2_5_5 return [chr(ord(__lowerCamelCase ) ^ key ) for ch in content] def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 0 ) -> str: assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = key or self.__key or 1 # make sure key can be any size while key > 2_5_5: key -= 2_5_5 # This will be returned _SCREAMING_SNAKE_CASE : Tuple = "" for ch in content: ans += chr(ord(__lowerCamelCase ) ^ key ) return ans def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 0 ) -> str: assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 2_5_5: key -= 2_5_5 # This will be returned _SCREAMING_SNAKE_CASE : int = "" for ch in content: ans += chr(ord(__lowerCamelCase ) ^ key ) return ans def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = 0 ) -> bool: assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) try: with open(__lowerCamelCase ) as fin, open("encrypt.out" , "w+" ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__lowerCamelCase , __lowerCamelCase ) ) except OSError: return False return True def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> bool: assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) try: with open(__lowerCamelCase ) as fin, open("decrypt.out" , "w+" ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__lowerCamelCase , __lowerCamelCase ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
364
from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch("socket.socket" ) @patch("builtins.open" ) def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ): # ===== initialization ===== _SCREAMING_SNAKE_CASE : List[Any] = Mock() _SCREAMING_SNAKE_CASE : Optional[Any] = conn, Mock() _SCREAMING_SNAKE_CASE : Dict = iter([1, None] ) _SCREAMING_SNAKE_CASE : Optional[Any] = lambda __lowerCamelCase : next(__lowerCamelCase ) # ===== invoke ===== send_file(filename="mytext.txt", testing=__lowerCamelCase ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
325
0
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class A ( lowercase_ ): def __init__(self , lowerCAmelCase , lowerCAmelCase ): super().__init__() self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase ) @torch.no_grad() def __call__(self , lowerCAmelCase = 1 , lowerCAmelCase = 1_0_0 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , ): if audio_length_in_s is None: __lowercase= self.unet.config.sample_size / self.unet.config.sample_rate __lowercase= audio_length_in_s * self.unet.config.sample_rate __lowercase= 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to' f' {3 * down_scale_factor / self.unet.config.sample_rate}.' ) __lowercase= int(__UpperCamelCase ) if sample_size % down_scale_factor != 0: __lowercase= ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled' f' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising' ' process.' ) __lowercase= int(__UpperCamelCase ) __lowercase= next(iter(self.unet.parameters() ) ).dtype __lowercase= (batch_size, self.unet.config.in_channels, sample_size) if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size: raise ValueError( f'You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch' f' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) __lowercase= randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) # set step values self.scheduler.set_timesteps(__UpperCamelCase , device=audio.device ) __lowercase= self.scheduler.timesteps.to(__UpperCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output __lowercase= self.unet(__UpperCamelCase , __UpperCamelCase ).sample # 2. compute previous image: x_t -> t_t-1 __lowercase= self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample __lowercase= audio.clamp(-1 , 1 ).float().cpu().numpy() __lowercase= audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=__UpperCamelCase )
295
"""simple docstring""" import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def A__ ( UpperCamelCase ): A = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(UpperCamelCase , UpperCamelCase ) def A__ ( UpperCamelCase ): A = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: A = s_dict.pop(UpperCamelCase ) elif "subsample" in key: A = s_dict.pop(UpperCamelCase ) def A__ ( UpperCamelCase ): A, A = emb.weight.shape A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase ) A = emb.weight.data return lin_layer def A__ ( UpperCamelCase , UpperCamelCase ): A = torch.load(UpperCamelCase , map_location="cpu" ) A = mam_aaa["args"] A = mam_aaa["model"] A = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(UpperCamelCase ) rename_keys(UpperCamelCase ) A = state_dict["decoder.embed_tokens.weight"].shape[0] A = args.share_decoder_input_output_embed A = [int(UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )] A = SpeechaTextConfig( vocab_size=UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase , num_beams=5 , max_length=200 , use_cache=UpperCamelCase , decoder_start_token_id=2 , early_stopping=UpperCamelCase , ) A = SpeechaTextForConditionalGeneration(UpperCamelCase ) A, A = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase ) if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F" but all the following weights are missing {missing}" ) if tie_embeds: A = make_linear_from_emb(model.model.decoder.embed_tokens ) else: A = lm_head_weights model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _snake_case : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') _snake_case : str = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
292
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __A ={'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =['GLPNFeatureExtractor'] __A =['GLPNImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A =[ 'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST', 'GLPNForDepthEstimation', 'GLPNLayer', 'GLPNModel', 'GLPNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import ( GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel, ) else: import sys __A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
283
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class _snake_case ( unittest.TestCase ): def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ): UpperCAmelCase__ : Union[str, Any] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Tuple = seq_length UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : Tuple = use_attention_mask UpperCAmelCase__ : Optional[Any] = use_token_type_ids UpperCAmelCase__ : Optional[Any] = use_labels UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : Dict = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : Optional[int] = num_attention_heads UpperCAmelCase__ : Optional[int] = intermediate_size UpperCAmelCase__ : Tuple = hidden_act UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase__ : List[Any] = max_position_embeddings UpperCAmelCase__ : Optional[int] = type_vocab_size UpperCAmelCase__ : str = type_sequence_label_size UpperCAmelCase__ : Optional[int] = initializer_range UpperCAmelCase__ : str = num_choices def snake_case__ ( self): UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) UpperCAmelCase__ : Union[str, Any] = None if self.use_attention_mask: UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length]) UpperCAmelCase__ : Dict = None if self.use_token_type_ids: UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) UpperCAmelCase__ : Optional[Any] = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def snake_case__ ( self): UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = config_and_inputs UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class _snake_case ( a__ , unittest.TestCase ): lowerCAmelCase :Optional[int] = True lowerCAmelCase :Any = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def snake_case__ ( self): UpperCAmelCase__ : List[str] = FlaxRoFormerModelTester(self) @slow def snake_case__ ( self): for model_class_name in self.all_model_classes: UpperCAmelCase__ : List[str] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowerCamelCase) UpperCAmelCase__ : Dict = model(np.ones((1, 1))) self.assertIsNotNone(_lowerCamelCase) @require_flax class _snake_case ( unittest.TestCase ): @slow def snake_case__ ( self): UpperCAmelCase__ : Any = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""") UpperCAmelCase__ : int = jnp.array([[0, 1, 2, 3, 4, 5]]) UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)[0] UpperCAmelCase__ : Union[str, Any] = 5_0000 UpperCAmelCase__ : Any = (1, 6, vocab_size) self.assertEqual(output.shape , _lowerCamelCase) UpperCAmelCase__ : Union[str, Any] = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]]) self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4))
283
1
"""simple docstring""" def lowercase ( __snake_case : float , __snake_case : float ): return price * (1 + tax_rate) if __name__ == "__main__": print(F"""{price_plus_tax(100, 0.25) = }""") print(F"""{price_plus_tax(125.50, 0.05) = }""")
33
"""simple docstring""" def lowercase ( __snake_case : list[int] ): lowercase_ : List[Any] = len(__snake_case ) for i in range(__snake_case ): for j in range(i + 1 , __snake_case ): if numbers[j] < numbers[i]: lowercase_ , lowercase_ : Optional[int] = numbers[j], numbers[i] return numbers if __name__ == "__main__": __A : int = input('''Enter numbers separated by a comma:\n''').strip() __A : Any = [int(item) for item in user_input.split(''',''')] print(exchange_sort(unsorted))
33
1
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration SCREAMING_SNAKE_CASE : List[str] = 50000 SCREAMING_SNAKE_CASE : List[str] = 5000 SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = os.path.split(__file__) SCREAMING_SNAKE_CASE : List[Any] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str: for i in range(lowerCamelCase_ ): _lowercase : List[str] = dataset[i] @get_duration def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any: for i in range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ ): _lowercase : Optional[Any] = dataset[i : i + batch_size] @get_duration def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]: with dataset.formatted_as(type=lowerCamelCase_ ): for i in range(lowerCamelCase_ ): _lowercase : Optional[Any] = dataset[i] @get_duration def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]: with dataset.formatted_as(type=lowerCamelCase_ ): for i in range(0 , lowerCamelCase_ , lowerCamelCase_ ): _lowercase : Dict = dataset[i : i + batch_size] def UpperCamelCase_( ) -> int: _lowercase : Optional[Any] = {'num examples': SPEED_TEST_N_EXAMPLES} _lowercase : Union[str, Any] = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] _lowercase : Optional[Any] = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) _lowercase : Optional[int] = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) _lowercase : Dict = generate_example_dataset( os.path.join(lowerCamelCase_ , 'dataset.arrow' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(lowerCamelCase_ ) ) _lowercase : Dict = func(lowerCamelCase_ , **lowerCamelCase_ ) print('shuffling dataset' ) _lowercase : List[Any] = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(lowerCamelCase_ ) ) _lowercase : Optional[Any] = func( lowerCamelCase_ , **lowerCamelCase_ ) with open(lowerCamelCase_ , 'wb' ) as f: f.write(json.dumps(lowerCamelCase_ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
84
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Dict = ["DeiTFeatureExtractor"] SCREAMING_SNAKE_CASE : Dict = ["DeiTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : str = [ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", "DeiTModel", "DeiTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[str] = [ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDeiTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
84
1
def UpperCamelCase ( snake_case__ : int | float | str ) -> tuple[int, int]: try: UpperCamelCase : Optional[Any] = float(snake_case__ ) except ValueError: raise ValueError('Please enter a valid number' ) UpperCamelCase : Optional[int] = decimal - int(snake_case__ ) if fractional_part == 0: return int(snake_case__ ), 1 else: UpperCamelCase : List[str] = len(str(snake_case__ ).split('.' )[1] ) UpperCamelCase : List[str] = int(decimal * (10**number_of_frac_digits) ) UpperCamelCase : Tuple = 10**number_of_frac_digits UpperCamelCase , UpperCamelCase : List[str] = denominator, numerator while True: UpperCamelCase : Any = dividend % divisor if remainder == 0: break UpperCamelCase , UpperCamelCase : Optional[Any] = divisor, remainder UpperCamelCase , UpperCamelCase : int = numerator / divisor, denominator / divisor return int(snake_case__ ), int(snake_case__ ) if __name__ == "__main__": print(F"""{decimal_to_fraction(2) = }""") print(F"""{decimal_to_fraction(89.0) = }""") print(F"""{decimal_to_fraction("67") = }""") print(F"""{decimal_to_fraction("45.0") = }""") print(F"""{decimal_to_fraction(1.5) = }""") print(F"""{decimal_to_fraction("6.25") = }""") print(F"""{decimal_to_fraction("78td") = }""")
119
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json''' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : Optional[Any] = "speech_to_text_2" UpperCAmelCase__ : List[Any] = ["past_key_values"] UpperCAmelCase__ : Any = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"} def __init__( self, SCREAMING_SNAKE_CASE_=1_0000, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="relu", SCREAMING_SNAKE_CASE_=256, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=1024, **SCREAMING_SNAKE_CASE_, ) -> int: UpperCamelCase : Optional[int] = vocab_size UpperCamelCase : List[str] = d_model UpperCamelCase : List[str] = decoder_ffn_dim UpperCamelCase : Optional[Any] = decoder_layers UpperCamelCase : Any = decoder_attention_heads UpperCamelCase : Tuple = dropout UpperCamelCase : str = attention_dropout UpperCamelCase : str = activation_dropout UpperCamelCase : Union[str, Any] = activation_function UpperCamelCase : Optional[int] = init_std UpperCamelCase : Tuple = decoder_layerdrop UpperCamelCase : Dict = use_cache UpperCamelCase : Any = decoder_layers UpperCamelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True UpperCamelCase : Union[str, Any] = max_target_positions super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_, bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, decoder_start_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
119
1
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py lowerCAmelCase_ = """\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\", author = \"Lin, Chin-Yew and Och, Franz Josef\", booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\", month = \"aug 23{--}aug 27\", year = \"2004\", address = \"Geneva, Switzerland\", publisher = \"COLING\", url = \"https://www.aclweb.org/anthology/C04-1072\", pages = \"501--507\", } """ lowerCAmelCase_ = """\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation, the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. """ lowerCAmelCase_ = """ Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 'bleu': bleu score, 'precisions': geometric mean of n-gram precisions, 'brevity_penalty': brevity penalty, 'length_ratio': ratio of lengths, 'translation_length': translation_length, 'reference_length': reference_length Examples: >>> predictions = [ ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample ... ] >>> references = [ ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references) ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric(\"bleu\") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results[\"bleu\"]) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def UpperCamelCase_ ( self : str ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[ 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def UpperCamelCase_ ( self : int , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=False ): '''simple docstring''' _snake_case : Optional[int] = compute_bleu( reference_corpus=UpperCamelCase , translation_corpus=UpperCamelCase , max_order=UpperCamelCase , smooth=UpperCamelCase ) ((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) : Optional[Any] = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
356
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: Path , lowerCAmelCase: str = None , lowerCAmelCase: str = None , lowerCAmelCase: str = None , )-> List[Any]: if config_name_or_path is None: _snake_case : int = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base' if generator_tokenizer_name_or_path is None: _snake_case : Optional[int] = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: _snake_case : List[str] = question_encoder_name_or_path _snake_case : List[str] = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration # Save model. _snake_case : Any = RagConfig.from_pretrained(lowerCAmelCase ) _snake_case : Tuple = AutoConfig.from_pretrained(lowerCAmelCase ) _snake_case : Any = AutoConfig.from_pretrained(lowerCAmelCase ) _snake_case : int = gen_config _snake_case : Tuple = question_encoder_config _snake_case : int = model_class.from_pretrained_question_encoder_generator( lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase ) rag_model.save_pretrained(lowerCAmelCase ) # Sanity check. model_class.from_pretrained(lowerCAmelCase ) # Save tokenizers. _snake_case : int = AutoTokenizer.from_pretrained(lowerCAmelCase ) gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' ) _snake_case : str = AutoTokenizer.from_pretrained(lowerCAmelCase ) question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( """--model_type""", choices=["""rag_sequence""", """rag_token"""], required=True, type=str, help="""RAG model type: rag_sequence, rag_token""", ) parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""") parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""") parser.add_argument( """--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier""" ) parser.add_argument( """--generator_tokenizer_name_or_path""", type=str, help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""", ) parser.add_argument( """--question_encoder_tokenizer_name_or_path""", type=str, help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""", ) parser.add_argument( """--config_name_or_path""", type=str, help=( """Identifier of the model config to use, if not provided, resolves to a base config for a given""" """ ``model_type``""" ), ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
260
0
'''simple docstring''' import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCAmelCase : '''simple docstring''' def __init__(self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=13 , _lowerCAmelCase : Optional[Any]=30 , _lowerCAmelCase : str=2 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : int=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Any=32 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : int=37 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Any=10 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : int=None , ): A = parent A = batch_size A = image_size A = patch_size A = num_channels A = is_training A = use_labels A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = type_sequence_label_size A = initializer_range A = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A = (image_size // patch_size) ** 2 A = num_patches + 1 def A (self : str ): A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_labels: A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A = self.get_config() return config, pixel_values, labels def A (self : List[str] ): return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def A (self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ): A = ViTMSNModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() A = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A (self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ): A = self.type_sequence_label_size A = ViTMSNForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() A = model(_lowerCAmelCase , labels=_lowerCAmelCase ) print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" ) print("""Labels: {labels}""" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A = 1 A = ViTMSNForImageClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A (self : List[str] ): A = self.prepare_config_and_inputs() A , A , A = config_and_inputs A = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ): '''simple docstring''' __lowerCAmelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () __lowerCAmelCase = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def A (self : Optional[int] ): A = ViTMSNModelTester(self ) A = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 ) def A (self : Optional[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMSN does not use inputs_embeds""" ) def A (self : str ): pass def A (self : Union[str, Any] ): A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) ) def A (self : Union[str, Any] ): A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(_lowerCAmelCase ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def A (self : Union[str, Any] ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def A (self : Dict ): A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def A (self : Any ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = ViTMSNModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def __a ( ) ->Optional[int]: """simple docstring""" A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def A (self : int ): return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None @slow def A (self : Tuple ): torch.manual_seed(2 ) A = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_lowerCAmelCase ) A = self.default_image_processor A = prepare_img() A = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): A = model(**_lowerCAmelCase ) # verify the logits A = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
258
'''simple docstring''' from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. _lowerCamelCase : int = 10 def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" for i in range(UpperCAmelCase , UpperCAmelCase ): if array[i] == target: return i return -1 def __a ( UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" A = 0 A = len(UpperCAmelCase ) while left <= right: if right - left < precision: return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A = (left + right) // 3 + 1 A = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: A = one_third - 1 elif array[two_third] < target: A = two_third + 1 else: A = one_third + 1 A = two_third - 1 else: return -1 def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" if left < right: if right - left < precision: return lin_search(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A = (left + right) // 3 + 1 A = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(UpperCAmelCase , one_third - 1 , UpperCAmelCase , UpperCAmelCase ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCAmelCase , UpperCAmelCase ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase : str = input('Enter numbers separated by comma:\n').strip() _lowerCamelCase : str = [int(item.strip()) for item in user_input.split(',')] assert collection == sorted(collection), f"List must be ordered.\n{collection}." _lowerCamelCase : Optional[int] = int(input('Enter the number to be found in the list:\n').strip()) _lowerCamelCase : Union[str, Any] = ite_ternary_search(collection, target) _lowerCamelCase : Union[str, Any] = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f"Iterative search: {target} found at positions: {resulta}") print(f"Recursive search: {target} found at positions: {resulta}") else: print('Not found')
258
1
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo __SCREAMING_SNAKE_CASE ="\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" __SCREAMING_SNAKE_CASE ="\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" __SCREAMING_SNAKE_CASE ="\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase ( datasets.Metric ): def _UpperCAmelCase ( self ) -> MetricInfo: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' ,id='token' ) ,id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' ,id='token' ) ,id='sequence' ) ,id='references' ), } ) ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = 1 ,__UpperCamelCase = 4 ,) -> Dict[str, float]: '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__UpperCamelCase ,hypotheses=__UpperCamelCase ,min_len=__UpperCamelCase ,max_len=__UpperCamelCase ) }
362
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : List[Any] = name lowercase_ : int = val def __str__( self ) -> Tuple: '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.val < other.val class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = {} lowercase_ : Tuple = {} lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase ) def __getitem__( self ,__UpperCamelCase ) -> int: '''simple docstring''' return self.get_value(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return (idx - 1) // 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return idx * 2 + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return idx * 2 + 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.heap_dict[key] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1 lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): lowercase_ : Any = idx lowercase_ : str = i.val for i in range(__UpperCamelCase ,-1 ,-1 ): self.sift_down(__UpperCamelCase ,__UpperCamelCase ) return array def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' while True: lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase ) lowercase_ : List[str] = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: lowercase_ : List[str] = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: lowercase_ : Dict = r if smallest != idx: lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx] ( ( lowercase_ ) , ( lowercase_ ) , ) : str = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase_ : Any = smallest else: break def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase_ : int = p lowercase_ : str = self.get_parent_idx(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self.heap[0] def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase_ : Tuple = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' self.heap.append(__UpperCamelCase ) lowercase_ : Tuple = len(self.heap ) - 1 lowercase_ : Optional[int] = node.val self.sift_up(len(self.heap ) - 1 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.heap ) == 0 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase_ : Any = new_value lowercase_ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE =Node("R", -1) __SCREAMING_SNAKE_CASE =Node("B", 6) __SCREAMING_SNAKE_CASE =Node("A", 3) __SCREAMING_SNAKE_CASE =Node("X", 1) __SCREAMING_SNAKE_CASE =Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
321
0
'''simple docstring''' def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ): lowercase__ : int = len(UpperCAmelCase ) lowercase__ : int = len(UpperCAmelCase ) lowercase__ : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) lowercase__ : list = [] for char_count in range(UpperCAmelCase ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(UpperCAmelCase ) if __name__ == "__main__": print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
198
'''simple docstring''' def __UpperCamelCase ( UpperCAmelCase = 1 , UpperCAmelCase = 1000 ): lowercase__ : Dict = 1 lowercase__ : Dict = 0 for divide_by_number in range(UpperCAmelCase , digit + 1 ): lowercase__ : list[int] = [] lowercase__ : Union[str, Any] = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(UpperCAmelCase ): lowercase__ : Dict = len(UpperCAmelCase ) lowercase__ : Optional[Any] = divide_by_number else: has_been_divided.append(UpperCAmelCase ) lowercase__ : int = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
198
1
'''simple docstring''' def _a( UpperCamelCase__ : int = 5_0 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple =[1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2, 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(F'''{solution() = }''')
353
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py a_ = 'src/diffusers' # Matches is_xxx_available() a_ = re.compile(R'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla a_ = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') a_ = '\n{0} = None\n' a_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' a_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def _a( UpperCamelCase__ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict =_re_backend.findall(UpperCamelCase__ ) if len(UpperCamelCase__ ) == 0: return None return "_and_".join(UpperCamelCase__ ) def _a( ): '''simple docstring''' with open(os.path.join(UpperCamelCase__, '''__init__.py''' ), '''r''', encoding='''utf-8''', newline='''\n''' ) as f: SCREAMING_SNAKE_CASE__ : List[str] =f.readlines() # Get to the point we do the actual imports for type checking SCREAMING_SNAKE_CASE__ : Optional[int] =0 SCREAMING_SNAKE_CASE__ : List[str] ={} # Go through the end of the file while line_index < len(UpperCamelCase__ ): # If the line contains is_backend_available, we grab all objects associated with the `else` block SCREAMING_SNAKE_CASE__ : List[Any] =find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('''else:''' ): line_index += 1 line_index += 1 SCREAMING_SNAKE_CASE__ : List[Any] =[] # Until we unindent, add backend objects to the list while line_index < len(UpperCamelCase__ ) and len(lines[line_index] ) > 1: SCREAMING_SNAKE_CASE__ : Optional[Any] =lines[line_index] SCREAMING_SNAKE_CASE__ : Optional[Any] =_re_single_line_import.search(UpperCamelCase__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(UpperCamelCase__ ) > 0: SCREAMING_SNAKE_CASE__ : Any =objects else: line_index += 1 return backend_specific_objects def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[Any] ): '''simple docstring''' if name.isupper(): return DUMMY_CONSTANT.format(UpperCamelCase__ ) elif name.islower(): return DUMMY_FUNCTION.format(UpperCamelCase__, UpperCamelCase__ ) else: return DUMMY_CLASS.format(UpperCamelCase__, UpperCamelCase__ ) def _a( UpperCamelCase__ : Any=None ): '''simple docstring''' if backend_specific_objects is None: SCREAMING_SNAKE_CASE__ : int =read_init() # For special correspondence backend to module name as used in the function requires_modulename SCREAMING_SNAKE_CASE__ : Optional[int] ={} for backend, objects in backend_specific_objects.items(): SCREAMING_SNAKE_CASE__ : Tuple ='''[''' + ''', '''.join(f"\"{b}\"" for b in backend.split('''_and_''' ) ) + ''']''' SCREAMING_SNAKE_CASE__ : List[str] ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n''' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(UpperCamelCase__, UpperCamelCase__ ) for o in objects] ) SCREAMING_SNAKE_CASE__ : Tuple =dummy_file return dummy_files def _a( UpperCamelCase__ : Any=False ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple =create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py SCREAMING_SNAKE_CASE__ : List[str] ={'''torch''': '''pt'''} # Locate actual dummy modules and read their content. SCREAMING_SNAKE_CASE__ : List[str] =os.path.join(UpperCamelCase__, '''utils''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] ={ backend: os.path.join(UpperCamelCase__, f"dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py" ) for backend in dummy_files.keys() } SCREAMING_SNAKE_CASE__ : str ={} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(UpperCamelCase__ ): with open(UpperCamelCase__, '''r''', encoding='''utf-8''', newline='''\n''' ) as f: SCREAMING_SNAKE_CASE__ : List[Any] =f.read() else: SCREAMING_SNAKE_CASE__ : int ='''''' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f"Updating diffusers.utils.dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py as the main " '''__init__ has new objects.''' ) with open(dummy_file_paths[backend], '''w''', encoding='''utf-8''', newline='''\n''' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( '''The main __init__ has objects that are not present in ''' f"diffusers.utils.dummy_{short_names.get(UpperCamelCase__, UpperCamelCase__ )}_objects.py. Run `make fix-copies` " '''to fix this.''' ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') a_ = parser.parse_args() check_dummies(args.fix_and_overwrite)
222
0